Skip to content

aggregation

Compatible implementation for various downsample methods and open interface to other downsample methods.

AbstractAggregator

Bases: ABC

Source code in plotly_resampler/aggregation/aggregation_interface.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
class AbstractAggregator(ABC):
    def __init__(
        self,
        x_dtype_regex_list: Optional[List[str]] = None,
        y_dtype_regex_list: Optional[List[str]] = None,
        **downsample_kwargs,
    ):
        """Constructor of AbstractSeriesAggregator.

        Parameters
        ----------
        x_dtype_regex_list: List[str], optional
            List containing the regex matching the supported datatypes for the x array,
            by default None.
        y_dtype_regex_list: List[str], optional
            List containing the regex matching the supported datatypes for the y array,
            by default None.
        downsample_kwargs: dict
            Additional kwargs passed to the downsample method.

        """
        self.x_dtype_regex_list = x_dtype_regex_list
        self.y_dtype_regex_list = y_dtype_regex_list
        self.downsample_kwargs = downsample_kwargs

    @staticmethod
    def _check_n_out(n_out: int) -> None:
        """Check if the n_out is valid."""
        assert isinstance(n_out, (int, np.integer))
        assert n_out > 0

    @staticmethod
    def _process_args(*args) -> Tuple[np.ndarray | None, np.ndarray]:
        """Process the args into the x and y arrays.

        If only y is passed, x is set to None.
        """
        assert len(args) in [1, 2], "Must pass either 1 or 2 arrays"
        x, y = (None, args[0]) if len(args) == 1 else args
        return x, y

    @staticmethod
    def _check_arr(arr: np.ndarray, regex_list: Optional[List[str]] = None):
        """Check if the array is valid."""
        assert isinstance(arr, np.ndarray), f"Expected np.ndarray, got {type(arr)}"
        assert arr.ndim == 1
        AbstractAggregator._supports_dtype(arr, regex_list)

    def _check_x_y(self, x: np.ndarray | None, y: np.ndarray) -> None:
        """Check if the x and y arrays are valid."""
        # Check x (if not None)
        if x is not None:
            self._check_arr(x, self.x_dtype_regex_list)
            assert x.shape == y.shape, "x and y must have the same shape"
        # Check y
        self._check_arr(y, self.y_dtype_regex_list)

    @staticmethod
    def _supports_dtype(arr: np.ndarray, dtype_regex_list: Optional[List[str]] = None):
        # base case
        if dtype_regex_list is None:
            return

        for dtype_regex_str in dtype_regex_list:
            m = re.compile(dtype_regex_str).match(str(arr.dtype))
            if m is not None:  # a match is found
                return
        raise ValueError(
            f"{arr.dtype} doesn't match with any regex in {dtype_regex_list}"
        )

__init__(x_dtype_regex_list=None, y_dtype_regex_list=None, **downsample_kwargs)

Constructor of AbstractSeriesAggregator.

Parameters:

Name Type Description Default
x_dtype_regex_list Optional[List[str]]

List containing the regex matching the supported datatypes for the x array, by default None.

None
y_dtype_regex_list Optional[List[str]]

List containing the regex matching the supported datatypes for the y array, by default None.

None
downsample_kwargs

Additional kwargs passed to the downsample method.

{}
Source code in plotly_resampler/aggregation/aggregation_interface.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
def __init__(
    self,
    x_dtype_regex_list: Optional[List[str]] = None,
    y_dtype_regex_list: Optional[List[str]] = None,
    **downsample_kwargs,
):
    """Constructor of AbstractSeriesAggregator.

    Parameters
    ----------
    x_dtype_regex_list: List[str], optional
        List containing the regex matching the supported datatypes for the x array,
        by default None.
    y_dtype_regex_list: List[str], optional
        List containing the regex matching the supported datatypes for the y array,
        by default None.
    downsample_kwargs: dict
        Additional kwargs passed to the downsample method.

    """
    self.x_dtype_regex_list = x_dtype_regex_list
    self.y_dtype_regex_list = y_dtype_regex_list
    self.downsample_kwargs = downsample_kwargs

AbstractGapHandler

Bases: ABC

Source code in plotly_resampler/aggregation/gap_handler_interface.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
class AbstractGapHandler(ABC):
    def __init__(self, fill_value: Optional[float] = None):
        """Constructor of AbstractGapHandler.

        Parameters
        ----------
        fill_value: float, optional
            The value to fill the gaps with, by default None.
            Note that setting this value to 0 for filled area plots is particularly
            useful.

        """
        self.fill_value = fill_value

    @abstractmethod
    def _get_gap_mask(self, x_agg: np.ndarray) -> Optional[np.ndarray]:
        """Get a boolean mask indicating the indices where there are gaps.

        If you require custom gap handling, you can implement this method to return a
        boolean mask indicating the indices where there are gaps.

        Parameters
        ----------
        x_agg: np.ndarray
            The x array. This is used to determine the gaps.

        Returns
        -------
        Optional[np.ndarray]
            A boolean mask indicating the indices where there are gaps. If there are no
            gaps, None is returned.

        """
        pass

    def insert_fill_value_between_gaps(
        self,
        x_agg: np.ndarray,
        y_agg: np.ndarray,
        idxs: np.ndarray,
    ) -> Tuple[np.ndarray, np.ndarray]:
        """Insert the fill_value in the y_agg array where there are gaps.

        Gaps are determined by the x_agg array. The `_get_gap_mask` method is used to
        determine a boolean mask indicating the indices where there are gaps.

        Parameters
        ----------
        x_agg: np.ndarray
            The x array. This is used to determine the gaps.
        y_agg: np.ndarray
            The y array. A copy of this array will be expanded with fill_values where
            there are gaps.
        idxs: np.ndarray
            The index array. This is relevant aggregators that perform data point
            selection (e.g., max, min, etc.) - this array will be expanded with the
            same indices where there are gaps.

        Returns
        -------
        Tuple[np.ndarray, np.ndarray]
            The expanded y_agg array and the expanded idxs array respectively.

        """
        gap_mask = self._get_gap_mask(x_agg)
        if gap_mask is None:
            # no gaps are found, nothing to do
            return y_agg, idxs

        # An array filled with 1s and 2s, where 2 indicates a large gap mask
        # (i.e., that index will be repeated twice)
        repeats = np.ones(x_agg.shape, dtype="int") + gap_mask

        # use the repeats to expand the idxs, and agg_y array
        idx_exp_nan = np.repeat(idxs, repeats)
        y_agg_exp_nan = np.repeat(y_agg, repeats)

        # only float arrays can contain NaN values
        if issubclass(y_agg_exp_nan.dtype.type, np.integer) or issubclass(
            y_agg_exp_nan.dtype.type, np.bool_
        ):
            y_agg_exp_nan = y_agg_exp_nan.astype("float")

        # Set the NaN values
        # We add the gap index offset (via the np.arange) to the indices to account for
        # the repeats (i.e., expanded y_agg array).
        y_agg_exp_nan[
            np.where(gap_mask)[0] + np.arange(gap_mask.sum())
        ] = self.fill_value

        return y_agg_exp_nan, idx_exp_nan

__init__(fill_value=None)

Constructor of AbstractGapHandler.

Parameters:

Name Type Description Default
fill_value Optional[float]

The value to fill the gaps with, by default None. Note that setting this value to 0 for filled area plots is particularly useful.

None
Source code in plotly_resampler/aggregation/gap_handler_interface.py
14
15
16
17
18
19
20
21
22
23
24
25
def __init__(self, fill_value: Optional[float] = None):
    """Constructor of AbstractGapHandler.

    Parameters
    ----------
    fill_value: float, optional
        The value to fill the gaps with, by default None.
        Note that setting this value to 0 for filled area plots is particularly
        useful.

    """
    self.fill_value = fill_value

insert_fill_value_between_gaps(x_agg, y_agg, idxs)

Insert the fill_value in the y_agg array where there are gaps.

Gaps are determined by the x_agg array. The _get_gap_mask method is used to determine a boolean mask indicating the indices where there are gaps.

Parameters:

Name Type Description Default
x_agg ndarray

The x array. This is used to determine the gaps.

required
y_agg ndarray

The y array. A copy of this array will be expanded with fill_values where there are gaps.

required
idxs ndarray

The index array. This is relevant aggregators that perform data point selection (e.g., max, min, etc.) - this array will be expanded with the same indices where there are gaps.

required

Returns:

Type Description
Tuple[ndarray, ndarray]

The expanded y_agg array and the expanded idxs array respectively.

Source code in plotly_resampler/aggregation/gap_handler_interface.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
def insert_fill_value_between_gaps(
    self,
    x_agg: np.ndarray,
    y_agg: np.ndarray,
    idxs: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
    """Insert the fill_value in the y_agg array where there are gaps.

    Gaps are determined by the x_agg array. The `_get_gap_mask` method is used to
    determine a boolean mask indicating the indices where there are gaps.

    Parameters
    ----------
    x_agg: np.ndarray
        The x array. This is used to determine the gaps.
    y_agg: np.ndarray
        The y array. A copy of this array will be expanded with fill_values where
        there are gaps.
    idxs: np.ndarray
        The index array. This is relevant aggregators that perform data point
        selection (e.g., max, min, etc.) - this array will be expanded with the
        same indices where there are gaps.

    Returns
    -------
    Tuple[np.ndarray, np.ndarray]
        The expanded y_agg array and the expanded idxs array respectively.

    """
    gap_mask = self._get_gap_mask(x_agg)
    if gap_mask is None:
        # no gaps are found, nothing to do
        return y_agg, idxs

    # An array filled with 1s and 2s, where 2 indicates a large gap mask
    # (i.e., that index will be repeated twice)
    repeats = np.ones(x_agg.shape, dtype="int") + gap_mask

    # use the repeats to expand the idxs, and agg_y array
    idx_exp_nan = np.repeat(idxs, repeats)
    y_agg_exp_nan = np.repeat(y_agg, repeats)

    # only float arrays can contain NaN values
    if issubclass(y_agg_exp_nan.dtype.type, np.integer) or issubclass(
        y_agg_exp_nan.dtype.type, np.bool_
    ):
        y_agg_exp_nan = y_agg_exp_nan.astype("float")

    # Set the NaN values
    # We add the gap index offset (via the np.arange) to the indices to account for
    # the repeats (i.e., expanded y_agg array).
    y_agg_exp_nan[
        np.where(gap_mask)[0] + np.arange(gap_mask.sum())
    ] = self.fill_value

    return y_agg_exp_nan, idx_exp_nan

EveryNthPoint

Bases: DataPointSelector

Naive (but fast) aggregator method which returns every N’th point.

Note

This downsampler supports all dtypes.

Source code in plotly_resampler/aggregation/aggregators.py
267
268
269
270
271
272
273
274
275
276
277
278
279
280
class EveryNthPoint(DataPointSelector):
    """Naive (but fast) aggregator method which returns every N'th point.

    !!! note
        This downsampler supports all dtypes.
    """

    def _arg_downsample(
        self,
        x: np.ndarray | None,
        y: np.ndarray,
        n_out: int,
    ) -> np.ndarray:
        return EveryNthDownsampler().downsample(y, n_out=n_out)

FuncAggregator

Bases: DataAggregator

Aggregator instance which uses the passed aggregation func.

Warning

The user has total control which aggregation_func is passed to this method, hence the user should be careful to not make copies of the data, nor write to the data. Furthermore, the user should beware of performance issues when using more complex aggregation functions.

Attention

The user has total control which aggregation_func is passed to this method, hence it is the users’ responsibility to handle categorical and bool-based data types.

Source code in plotly_resampler/aggregation/aggregators.py
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
class FuncAggregator(DataAggregator):
    """Aggregator instance which uses the passed aggregation func.

    !!! warning

        The user has total control which `aggregation_func` is passed to this method,
        hence the user should be careful to not make copies of the data, nor write to
        the data. Furthermore, the user should beware of performance issues when
        using more complex aggregation functions.

    !!! warning "Attention"

        The user has total control which `aggregation_func` is passed to this method,
        hence it is the users' responsibility to handle categorical and bool-based
        data types.

    """

    def __init__(
        self,
        aggregation_func,
        x_dtype_regex_list=None,
        y_dtype_regex_list=None,
        **downsample_kwargs,
    ):
        """
        Parameters
        ----------
        aggregation_func: Callable
            The aggregation function which will be applied on each pin.

        """
        self.aggregation_func = aggregation_func
        super().__init__(x_dtype_regex_list, y_dtype_regex_list, **downsample_kwargs)

    def _aggregate(
        self,
        x: np.ndarray | None,
        y: np.ndarray,
        n_out: int,
    ) -> Tuple[np.ndarray, np.ndarray]:
        """Aggregate the data using the object's aggregation function.

        Parameters
        ----------
        x: np.ndarray | None
            The x-values of the data. Can be None if no x-values are available.
        y: np.ndarray
            The y-values of the data.
        n_out: int
            The number of output data points.
        **kwargs
            Additional keyword arguments, which are passed to the aggregation function.

        Returns
        -------
        Tuple[np.ndarray, np.ndarray]
            The aggregated x & y values.
            If `x` is None, then the indices of the first element of each bin is
            returned as x-values.

        """
        # Create an index-estimation for real-time data
        # Add one to the index so it's pointed at the end of the window
        # Note: this can be adjusted to .5 to center the data
        # Multiply it with the group size to get the real index-position
        # TODO: add option to select start / middle / end as index
        if x is None:
            # equidistant index
            idxs = np.linspace(0, len(y), n_out + 1).astype(int)
        else:
            xdt = x.dtype
            if np.issubdtype(xdt, np.datetime64) or np.issubdtype(xdt, np.timedelta64):
                x = x.view("int64")
            # Thanks to `linspace`, the data is evenly distributed over the index-range
            # The searchsorted function returns the index positions
            idxs = np.searchsorted(x, np.linspace(x[0], x[-1], n_out + 1))

        y_agg = np.array(
            [
                self.aggregation_func(y[t0:t1], **self.downsample_kwargs)
                for t0, t1 in zip(idxs[:-1], idxs[1:])
            ]
        )

        if x is not None:
            x_agg = x[idxs[:-1]]
        else:
            # x is None -> return the indices of the first element of each bin
            x_agg = idxs[:-1]

        return x_agg, y_agg

__init__(aggregation_func, x_dtype_regex_list=None, y_dtype_regex_list=None, **downsample_kwargs)

Parameters:

Name Type Description Default
aggregation_func

The aggregation function which will be applied on each pin.

required
Source code in plotly_resampler/aggregation/aggregators.py
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
def __init__(
    self,
    aggregation_func,
    x_dtype_regex_list=None,
    y_dtype_regex_list=None,
    **downsample_kwargs,
):
    """
    Parameters
    ----------
    aggregation_func: Callable
        The aggregation function which will be applied on each pin.

    """
    self.aggregation_func = aggregation_func
    super().__init__(x_dtype_regex_list, y_dtype_regex_list, **downsample_kwargs)

LTTB

Bases: DataPointSelector

Largest Triangle Three Buckets (LTTB) aggregation method.

This is arguably the most widely used aggregation method. It is based on the effective area of a triangle (inspired from the line simplification domain). The algorithm has $O(n)$ complexity, however, for large datasets, it can be much slower than other algorithms (e.g. MinMax) due to the higher cost of calculating the areas of triangles.

Thesis: https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf
Details on visual representativeness & stability: https://arxiv.org/abs/2304.00900

Tip

LTTB doesn’t scale super-well when moving to really large datasets, so when dealing with more than 1 million samples, you might consider using MinMaxLTTB.

Note

  • This class is mainly designed to operate on numerical data as LTTB calculates distances on the values.
    When dealing with categories, the data is encoded into its numeric codes, these codes are the indices of the category array.
  • To aggregate category data with LTTB, your pd.Series must be of dtype ‘category’.

tip:

if there is an order in your categories, order them that way, LTTB uses the ordered category codes values (see bullet above) to calculate distances and make aggregation decisions.
code:

    >>> import pandas as pd
    >>> s = pd.Series(["a", "b", "c", "a"])
    >>> cat_type = pd.CategoricalDtype(categories=["b", "c", "a"], ordered=True)
    >>> s_cat = s.astype(cat_type)
* LTTB has no downsample kwargs, as it cannot be paralellized. Instead, you can use the MinMaxLTTB downsampler, which performs minmax preselection (in parallel if configured so), followed by LTTB.

Source code in plotly_resampler/aggregation/aggregators.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
class LTTB(DataPointSelector):
    """Largest Triangle Three Buckets (LTTB) aggregation method.

    This is arguably the most widely used aggregation method. It is based on the
    effective area of a triangle (inspired from the line simplification domain).
    The algorithm has $O(n)$ complexity, however, for large datasets, it can be much
    slower than other algorithms (e.g. MinMax) due to the higher cost of calculating
    the areas of triangles.

    Thesis: [https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf](https://skemman.is/bitstream/1946/15343/3/SS_MSthesis.pdf) <br/>
    Details on visual representativeness & stability: [https://arxiv.org/abs/2304.00900](https://arxiv.org/abs/2304.00900)

    !!! tip

        `LTTB` doesn't scale super-well when moving to really large datasets, so when
        dealing with more than 1 million samples, you might consider using
        [`MinMaxLTTB`][aggregation.aggregators.MinMaxLTTB].


    !!! note

        * This class is mainly designed to operate on numerical data as LTTB calculates
          distances on the values. <br/>
          When dealing with categories, the data is encoded into its numeric codes,
          these codes are the indices of the category array.
        * To aggregate category data with LTTB, your ``pd.Series`` must be of dtype
          'category'. <br/>

          **tip**:

          if there is an order in your categories, order them that way, LTTB uses
          the ordered category codes values (see bullet above) to calculate distances and
          make aggregation decisions. <br/>
          **code**:
            ```python
                >>> import pandas as pd
                >>> s = pd.Series(["a", "b", "c", "a"])
                >>> cat_type = pd.CategoricalDtype(categories=["b", "c", "a"], ordered=True)
                >>> s_cat = s.astype(cat_type)
            ```
        * `LTTB` has no downsample kwargs, as it cannot be paralellized. Instead, you can
          use the [`MinMaxLTTB`][aggregation.aggregators.MinMaxLTTB] downsampler, which performs
          minmax preselection (in parallel if configured so), followed by LTTB.

    """

    def __init__(self):
        super().__init__(
            y_dtype_regex_list=[rf"{dtype}\d*" for dtype in ("float", "int", "uint")]
            + ["category", "bool"],
        )
        self.downsampler = LTTBDownsampler()

    def _arg_downsample(
        self,
        x: np.ndarray | None,
        y: np.ndarray,
        n_out: int,
    ) -> np.ndarray:
        return self.downsampler.downsample(*_to_tsdownsample_args(x, y), n_out=n_out)

MedDiffGapHandler

Bases: AbstractGapHandler

Gap handling based on the median diff of the x_agg array.

Source code in plotly_resampler/aggregation/gap_handlers.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
class MedDiffGapHandler(AbstractGapHandler):
    """Gap handling based on the median diff of the x_agg array."""

    def _calc_med_diff(self, x_agg: np.ndarray) -> Tuple[float, np.ndarray]:
        """Calculate the median diff of the x_agg array.

        As median is more robust to outliers than the mean, the median is used to define
        the gap threshold.

        This method performs a divide and conquer heuristic to calculate the median;
        1. divide the array into `n_blocks` blocks (with `n_blocks` = 128)
        2. calculate the mean of each block
        3. calculate the median of the means
        => This proves to be a good approximation of the median of the full array, while
              being much faster than calculating the median of the full array.
        """
        # remark: thanks to the prepend -> x_diff.shape === len(s)
        x_diff = np.diff(x_agg, prepend=x_agg[0])

        # To do so - use an approach where we reshape the data
        # into `n_blocks` blocks and calculate the mean and then the median on that
        # Why use `median` instead of a global mean?
        #   => when you have large gaps, they will be represented by a large diff
        #      which will skew the mean way more than the median!
        n_blocks = 128
        if x_agg.shape[0] > 5 * n_blocks:
            blck_size = x_diff.shape[0] // n_blocks

            # convert the index series index diff into a reshaped view (i.e., sid_v)
            sid_v: np.ndarray = x_diff[: blck_size * n_blocks].reshape(n_blocks, -1)

            # calculate the mean fore each block and then the median of those means
            med_diff = np.median(np.mean(sid_v, axis=1))
        else:
            med_diff = np.median(x_diff)

        return med_diff, x_diff

    def _get_gap_mask(self, x_agg: np.ndarray) -> Optional[np.ndarray]:
        """Get a boolean mask indicating the indices where there are gaps.

        If you require custom gap handling, you can implement this method to return a
        boolean mask indicating the indices where there are gaps.

        Parameters
        ----------
        x_agg: np.ndarray
            The x array. This is used to determine the gaps.

        Returns
        -------
        Optional[np.ndarray]
            A boolean mask indicating the indices where there are gaps. If there are no
            gaps, None is returned.

        """
        med_diff, x_diff = self._calc_med_diff(x_agg)

        # TODO: this 4 was revealed to me in a dream, but it seems to work well
        # After some consideration, we altered this to a 4.1
        gap_mask = x_diff > 4.1 * med_diff
        if not any(gap_mask):
            return
        return gap_mask

MinMaxAggregator

Bases: DataPointSelector

Aggregation method which performs binned min-max aggregation over fully overlapping windows.

This is arguably the most computational efficient downsampling method, as it only performs (non-expensive) comparisons on the data in a single pass.

Details on visual representativeness & stability: https://arxiv.org/abs/2304.00900

Note

This method is rather efficient when scaling to large data sizes and can be used as a data-reduction step before feeding it to the LTTB algorithm, as MinMaxLTTB does with the MinMaxOverlapAggregator.

Source code in plotly_resampler/aggregation/aggregators.py
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
class MinMaxAggregator(DataPointSelector):
    """Aggregation method which performs binned min-max aggregation over fully
    overlapping windows.

    This is arguably the most computational efficient downsampling method, as it only
    performs (non-expensive) comparisons on the data in a single pass.

    Details on visual representativeness & stability: [https://arxiv.org/abs/2304.00900](https://arxiv.org/abs/2304.00900)

    !!! note

        This method is rather efficient when scaling to large data sizes and can be used
        as a data-reduction step before feeding it to the [`LTTB`][aggregation.aggregators.LTTB]
        algorithm, as [`MinMaxLTTB`][aggregation.aggregators.MinMaxLTTB] does with the
        [`MinMaxOverlapAggregator`][aggregation.aggregators.MinMaxOverlapAggregator].

    """

    def __init__(self, nan_policy="omit", **downsample_kwargs):
        """
        Parameters
        ----------
        **downsample_kwargs
            Keyword arguments passed to the :class:`MinMaxDownsampler`.
            - The `parallel` argument is set to False by default.
        nan_policy: str, optional
            The policy to handle NaNs. Can be 'omit' or 'keep'. By default, 'omit'.

        """
        # this downsampler supports all dtypes
        super().__init__(**downsample_kwargs)
        if nan_policy not in ("omit", "keep"):
            raise ValueError("nan_policy must be either 'omit' or 'keep'")
        if nan_policy == "omit":
            self.downsampler = MinMaxDownsampler()
        else:
            self.downsampler = NaNMinMaxDownsampler()

    def _arg_downsample(
        self,
        x: np.ndarray | None,
        y: np.ndarray,
        n_out: int,
    ) -> np.ndarray:
        return self.downsampler.downsample(
            *_to_tsdownsample_args(x, y), n_out=n_out, **self.downsample_kwargs
        )

__init__(nan_policy='omit', **downsample_kwargs)

Parameters:

Name Type Description Default
**downsample_kwargs

Keyword arguments passed to the :class:MinMaxDownsampler. - The parallel argument is set to False by default.

{}
nan_policy

The policy to handle NaNs. Can be ‘omit’ or ‘keep’. By default, ‘omit’.

'omit'
Source code in plotly_resampler/aggregation/aggregators.py
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
def __init__(self, nan_policy="omit", **downsample_kwargs):
    """
    Parameters
    ----------
    **downsample_kwargs
        Keyword arguments passed to the :class:`MinMaxDownsampler`.
        - The `parallel` argument is set to False by default.
    nan_policy: str, optional
        The policy to handle NaNs. Can be 'omit' or 'keep'. By default, 'omit'.

    """
    # this downsampler supports all dtypes
    super().__init__(**downsample_kwargs)
    if nan_policy not in ("omit", "keep"):
        raise ValueError("nan_policy must be either 'omit' or 'keep'")
    if nan_policy == "omit":
        self.downsampler = MinMaxDownsampler()
    else:
        self.downsampler = NaNMinMaxDownsampler()

MinMaxLTTB

Bases: DataPointSelector

Efficient version off LTTB by first reducing really large datasets with the MinMaxAggregator and then further aggregating the reduced result with LTTB.

Starting from 10M data points, this method performs the MinMax-prefetching of data points to enhance computational efficiency.

Inventors: Jonas & Jeroen Van Der Donckt - 2022

Paper: https://arxiv.org/pdf/2305.00332.pdf

Source code in plotly_resampler/aggregation/aggregators.py
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
class MinMaxLTTB(DataPointSelector):
    """Efficient version off LTTB by first reducing really large datasets with
    the [`MinMaxAggregator`][aggregation.aggregators.MinMaxAggregator] and then further aggregating the
    reduced result with [`LTTB`][aggregation.aggregators.LTTB].

    Starting from 10M data points, this method performs the MinMax-prefetching of data
    points to enhance computational efficiency.

    Inventors: Jonas & Jeroen Van Der Donckt - 2022

    Paper: [https://arxiv.org/pdf/2305.00332.pdf](https://arxiv.org/pdf/2305.00332.pdf)
    """

    def __init__(
        self, minmax_ratio: int = 4, nan_policy: str = "omit", **downsample_kwargs
    ):
        """
        Parameters
        ----------
        minmax_ratio: int, optional
            The ratio between the number of data points in the MinMax-prefetching and
            the number of data points that will be outputted by LTTB. By default, 4.
        nan_policy: str, optional
            The policy to handle NaNs. Can be 'omit' or 'keep'. By default, 'omit'.
        **downsample_kwargs
            Keyword arguments passed to the `MinMaxLTTBDownsampler`.
            - The `parallel` argument is set to False by default.
            - The `minmax_ratio` argument is set to 4 by default, which was empirically
              proven to be a good default.

        """
        if nan_policy not in ("omit", "keep"):
            raise ValueError("nan_policy must be either 'omit' or 'keep'")
        if nan_policy == "omit":
            self.minmaxlttb = MinMaxLTTBDownsampler()
        else:
            self.minmaxlttb = NaNMinMaxLTTBDownsampler()

        self.minmax_ratio = minmax_ratio

        super().__init__(
            y_dtype_regex_list=[rf"{dtype}\d*" for dtype in ("float", "int", "uint")]
            + ["category", "bool"],
            **downsample_kwargs,
        )

    def _arg_downsample(
        self,
        x: np.ndarray | None,
        y: np.ndarray,
        n_out: int,
    ) -> np.ndarray:
        return self.minmaxlttb.downsample(
            *_to_tsdownsample_args(x, y),
            n_out=n_out,
            minmax_ratio=self.minmax_ratio,
            **self.downsample_kwargs,
        )

__init__(minmax_ratio=4, nan_policy='omit', **downsample_kwargs)

Parameters:

Name Type Description Default
minmax_ratio int

The ratio between the number of data points in the MinMax-prefetching and the number of data points that will be outputted by LTTB. By default, 4.

4
nan_policy str

The policy to handle NaNs. Can be ‘omit’ or ‘keep’. By default, ‘omit’.

'omit'
**downsample_kwargs

Keyword arguments passed to the MinMaxLTTBDownsampler. - The parallel argument is set to False by default. - The minmax_ratio argument is set to 4 by default, which was empirically proven to be a good default.

{}
Source code in plotly_resampler/aggregation/aggregators.py
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def __init__(
    self, minmax_ratio: int = 4, nan_policy: str = "omit", **downsample_kwargs
):
    """
    Parameters
    ----------
    minmax_ratio: int, optional
        The ratio between the number of data points in the MinMax-prefetching and
        the number of data points that will be outputted by LTTB. By default, 4.
    nan_policy: str, optional
        The policy to handle NaNs. Can be 'omit' or 'keep'. By default, 'omit'.
    **downsample_kwargs
        Keyword arguments passed to the `MinMaxLTTBDownsampler`.
        - The `parallel` argument is set to False by default.
        - The `minmax_ratio` argument is set to 4 by default, which was empirically
          proven to be a good default.

    """
    if nan_policy not in ("omit", "keep"):
        raise ValueError("nan_policy must be either 'omit' or 'keep'")
    if nan_policy == "omit":
        self.minmaxlttb = MinMaxLTTBDownsampler()
    else:
        self.minmaxlttb = NaNMinMaxLTTBDownsampler()

    self.minmax_ratio = minmax_ratio

    super().__init__(
        y_dtype_regex_list=[rf"{dtype}\d*" for dtype in ("float", "int", "uint")]
        + ["category", "bool"],
        **downsample_kwargs,
    )

MinMaxOverlapAggregator

Bases: DataPointSelector

Aggregation method which performs binned min-max aggregation over 50% overlapping windows.

minmax operator image

In the above image, bin_size: represents the size of (len(series) / n_out). As the windows have 50% overlap and are consecutive, the min & max values are calculated on a windows with size (2x bin-size).

This is very similar to the MinMaxAggregator, emperical results showed no observable difference between both approaches.

Note

This method is implemented in Python (leveraging numpy for vecotrization), but is significantly slower than the MinMaxAggregator (which is implemented in the tsdownsample toolkit in Rust).
As such, this class does not support any downsample kwargs.

Note

This downsampler supports all dtypes.

Source code in plotly_resampler/aggregation/aggregators.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
class MinMaxOverlapAggregator(DataPointSelector):
    """Aggregation method which performs binned min-max aggregation over 50% overlapping
    windows.

    ![minmax operator image](https://github.com/predict-idlab/plotly-resampler/blob/main/docs/sphinx/_static/minmax_operator.png)

    In the above image, **bin_size**: represents the size of *(len(series) / n_out)*.
    As the windows have 50% overlap and are consecutive, the min & max values are
    calculated on a windows with size (2x bin-size).

    This is *very* similar to the MinMaxAggregator, emperical results showed no
    observable difference between both approaches.

    !!! note

        This method is implemented in Python (leveraging numpy for vecotrization), but
        is **significantly slower than the MinMaxAggregator** (which is implemented in
        the tsdownsample toolkit in Rust). <br/>
        As such, this class does not support any downsample kwargs.

    !!! note

        This downsampler supports all dtypes.

    """

    def _arg_downsample(
        self,
        x: np.ndarray | None,
        y: np.ndarray,
        n_out: int,
    ) -> np.ndarray:
        # The block size 2x the bin size we also perform the ceil-operation
        # to ensure that the block_size * n_out / 2 < len(x)
        block_size = math.ceil(y.shape[0] / (n_out + 1) * 2)
        argmax_offset = block_size // 2

        # Calculate the offset range which will be added to the argmin and argmax pos
        offset = np.arange(
            0, stop=y.shape[0] - block_size - argmax_offset, step=block_size
        )

        # Calculate the argmin & argmax on the reshaped view of `y` &
        # add the corresponding offset
        argmin = (
            y[: block_size * offset.shape[0]].reshape(-1, block_size).argmin(axis=1)
            + offset
        )
        argmax = (
            y[argmax_offset : block_size * offset.shape[0] + argmax_offset]
            .reshape(-1, block_size)
            .argmax(axis=1)
            + offset
            + argmax_offset
        )

        # Sort the argmin & argmax (where we append the first and last index item)
        return np.unique(np.concatenate((argmin, argmax, [0, y.shape[0] - 1])))

NoGapHandler

Bases: AbstractGapHandler

No gap handling.

Source code in plotly_resampler/aggregation/gap_handlers.py
15
16
17
18
19
class NoGapHandler(AbstractGapHandler):
    """No gap handling."""

    def _get_gap_mask(self, x_agg: np.ndarray) -> Optional[np.ndarray]:
        return

PlotlyAggregatorParser

Source code in plotly_resampler/aggregation/plotly_aggregator_parser.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
class PlotlyAggregatorParser:
    @staticmethod
    def parse_hf_data(
        hf_data: np.ndarray | pd.Categorical | pd.Series | pd.Index,
    ) -> np.ndarray | pd.Categorical:
        """Parse the high-frequency data to a numpy array."""
        # Categorical data (pandas)
        #   - pd.Series with categorical dtype -> calling .values will returns a
        #       pd.Categorical
        #   - pd.CategoricalIndex -> calling .values returns a pd.Categorical
        #   - pd.Categorical: has no .values attribute -> will not be parsed
        if isinstance(hf_data, pd.RangeIndex):
            return None
        if isinstance(hf_data, (pd.Series, pd.Index)):
            return hf_data.values
        return hf_data

    @staticmethod
    def to_same_tz(
        ts: Union[pd.Timestamp, None], reference_tz: Union[pytz.BaseTzInfo, None]
    ) -> Union[pd.Timestamp, None]:
        """Adjust `ts` its timezone to the `reference_tz`."""
        if ts is None:
            return None
        elif reference_tz is not None:
            if ts.tz is not None:
                assert ts.tz.__str__() == reference_tz.__str__()
                return ts
            else:  # localize -> time remains the same
                return ts.tz_localize(reference_tz)
        elif reference_tz is None and ts.tz is not None:
            return ts.tz_localize(None)
        return ts

    @staticmethod
    def get_start_end_indices(hf_trace_data, axis_type, start, end) -> Tuple[int, int]:
        """Get the start & end indices of the high-frequency data."""
        # Base case: no hf data, or both start & end are None
        if not len(hf_trace_data["x"]):
            return 0, 0
        elif start is None and end is None:
            return 0, len(hf_trace_data["x"])

        # NOTE: as we use bisect right for the end index, we do not need to add a
        #      small epsilon to the end value
        start = hf_trace_data["x"][0] if start is None else start
        end = hf_trace_data["x"][-1] if end is None else end

        # NOTE: we must verify this before check if the x is a range-index
        if axis_type == "log":
            start, end = 10**start, 10**end

        # We can compute the start & end indices directly when it is a RangeIndex
        if isinstance(hf_trace_data["x"], pd.RangeIndex):
            x_start = hf_trace_data["x"].start
            x_step = hf_trace_data["x"].step
            start_idx = int(max((start - x_start) // x_step, 0))
            end_idx = int((end - x_start) // x_step)
            return start_idx, end_idx
        # TODO: this can be performed as-well for a fixed frequency range-index w/ freq

        if axis_type == "date":
            start, end = pd.to_datetime(start), pd.to_datetime(end)
            # convert start & end to the same timezone
            if isinstance(hf_trace_data["x"], pd.DatetimeIndex):
                tz = hf_trace_data["x"].tz
                assert start.tz == end.tz
                start = PlotlyAggregatorParser.to_same_tz(start, tz)
                end = PlotlyAggregatorParser.to_same_tz(end, tz)

        # Search the index-positions
        start_idx = bisect.bisect_left(hf_trace_data["x"], start)
        end_idx = bisect.bisect_right(hf_trace_data["x"], end)
        return start_idx, end_idx

    @staticmethod
    def _handle_gaps(
        hf_trace_data: dict,
        hf_x: np.ndarray,
        agg_x: np.ndarray,
        agg_y: np.ndarray,
        indices: np.ndarray,
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """Handle the gaps in the aggregated data.

        Returns:
            - agg_x: the aggregated x-values
            - agg_y: the aggregated y-values
            - indices: the indices of the hf_data data that were aggregated

        """
        gap_handler: AbstractGapHandler = hf_trace_data["gap_handler"]
        downsampler = hf_trace_data["downsampler"]

        # TODO check for trace mode (markers, lines, etc.) and only perform the
        # gap insertion methodology when the mode is lines.
        # if trace.get("connectgaps") != True and
        if (
            isinstance(gap_handler, NoGapHandler)
            # rangeIndex | datetimeIndex with freq -> equally spaced x; so no gaps
            or isinstance(hf_trace_data["x"], pd.RangeIndex)
            or (
                isinstance(hf_trace_data["x"], pd.DatetimeIndex)
                and hf_trace_data["x"].freq is not None
            )
        ):
            return agg_x, agg_y, indices

        # Interleave the gaps
        # View the data as an int64 when we have a DatetimeIndex
        # We only want to detect gaps, so we only want to compare values.
        agg_x_parsed = PlotlyAggregatorParser.parse_hf_data(agg_x)
        xdt = agg_x_parsed.dtype
        if np.issubdtype(xdt, np.timedelta64) or np.issubdtype(xdt, np.datetime64):
            agg_x_parsed = agg_x_parsed.view("int64")

        agg_y, indices = gap_handler.insert_fill_value_between_gaps(
            agg_x_parsed, agg_y, indices
        )
        if isinstance(downsampler, DataPointSelector):
            agg_x = hf_x[indices]
        elif isinstance(downsampler, DataAggregator):
            # The indices are in this case a repeat
            agg_x = agg_x[indices]

        return agg_x, agg_y, indices

    @staticmethod
    def aggregate(
        hf_trace_data: dict,
        start_idx: int,
        end_idx: int,
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """Aggregate the data in `hf_trace_data` between `start_idx` and `end_idx`.

        Returns:
            - x: the aggregated x-values
            - y: the aggregated y-values
            - indices: the indices of the hf_data data that were aggregated

            These indices are useful to select the corresponding hf_data from
            non `x` and `y` data (e.g. `text`, `marker_size`, `marker_color`).

        """
        hf_x = hf_trace_data["x"][start_idx:end_idx]
        hf_y = hf_trace_data["y"][start_idx:end_idx]

        # No downsampling needed ; we show the raw data as is, but with gap-detection
        if (end_idx - start_idx) <= hf_trace_data["max_n_samples"]:
            indices = np.arange(len(hf_y))  # no downsampling - all values are selected
            if len(indices):
                return PlotlyAggregatorParser._handle_gaps(
                    hf_trace_data, hf_x=hf_x, agg_x=hf_x, agg_y=hf_y, indices=indices
                )
            else:
                return hf_x, hf_y, indices

        downsampler = hf_trace_data["downsampler"]

        hf_x_parsed = PlotlyAggregatorParser.parse_hf_data(hf_x)
        hf_y_parsed = PlotlyAggregatorParser.parse_hf_data(hf_y)

        if isinstance(downsampler, DataPointSelector):
            s_v = hf_y_parsed
            if isinstance(s_v, pd.Categorical):  # pd.Categorical (has no .values)
                s_v = s_v.codes
            indices = downsampler.arg_downsample(
                hf_x_parsed,
                s_v,
                n_out=hf_trace_data["max_n_samples"],
                **hf_trace_data.get("downsampler_kwargs", {}),
            )
            if isinstance(hf_trace_data["x"], pd.RangeIndex):
                # we avoid slicing the default pd.RangeIndex (as this is not an
                # in-memory array) - this proves to be faster than slicing the index.
                agg_x = (
                    start_idx
                    + hf_trace_data["x"].start
                    + indices * hf_trace_data["x"].step
                )
            else:
                agg_x = hf_x[indices]
            agg_y = hf_y[indices]
        elif isinstance(downsampler, DataAggregator):
            agg_x, agg_y = downsampler.aggregate(
                hf_x_parsed,
                hf_y_parsed,
                n_out=hf_trace_data["max_n_samples"],
                **hf_trace_data.get("downsampler_kwargs", {}),
            )
            if isinstance(hf_trace_data["x"], pd.RangeIndex):
                # we avoid slicing the default pd.RangeIndex (as this is not an
                # in-memory array) - this proves to be faster than slicing the index.
                agg_x = (
                    start_idx
                    + hf_trace_data["x"].start
                    + agg_x * hf_trace_data["x"].step
                )
            # The indices are just the range of the aggregated data
            indices = np.arange(len(agg_x))
        else:
            raise ValueError(
                "Invalid downsampler instance, must be either a "
                + f"DataAggregator or a DataPointSelector, got {type(downsampler)}"
            )

        return PlotlyAggregatorParser._handle_gaps(
            hf_trace_data, hf_x=hf_x, agg_x=agg_x, agg_y=agg_y, indices=indices
        )

aggregate(hf_trace_data, start_idx, end_idx) staticmethod

Aggregate the data in hf_trace_data between start_idx and end_idx.

Returns: - x: the aggregated x-values - y: the aggregated y-values - indices: the indices of the hf_data data that were aggregated

These indices are useful to select the corresponding hf_data from
non `x` and `y` data (e.g. `text`, `marker_size`, `marker_color`).
Source code in plotly_resampler/aggregation/plotly_aggregator_parser.py
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
@staticmethod
def aggregate(
    hf_trace_data: dict,
    start_idx: int,
    end_idx: int,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """Aggregate the data in `hf_trace_data` between `start_idx` and `end_idx`.

    Returns:
        - x: the aggregated x-values
        - y: the aggregated y-values
        - indices: the indices of the hf_data data that were aggregated

        These indices are useful to select the corresponding hf_data from
        non `x` and `y` data (e.g. `text`, `marker_size`, `marker_color`).

    """
    hf_x = hf_trace_data["x"][start_idx:end_idx]
    hf_y = hf_trace_data["y"][start_idx:end_idx]

    # No downsampling needed ; we show the raw data as is, but with gap-detection
    if (end_idx - start_idx) <= hf_trace_data["max_n_samples"]:
        indices = np.arange(len(hf_y))  # no downsampling - all values are selected
        if len(indices):
            return PlotlyAggregatorParser._handle_gaps(
                hf_trace_data, hf_x=hf_x, agg_x=hf_x, agg_y=hf_y, indices=indices
            )
        else:
            return hf_x, hf_y, indices

    downsampler = hf_trace_data["downsampler"]

    hf_x_parsed = PlotlyAggregatorParser.parse_hf_data(hf_x)
    hf_y_parsed = PlotlyAggregatorParser.parse_hf_data(hf_y)

    if isinstance(downsampler, DataPointSelector):
        s_v = hf_y_parsed
        if isinstance(s_v, pd.Categorical):  # pd.Categorical (has no .values)
            s_v = s_v.codes
        indices = downsampler.arg_downsample(
            hf_x_parsed,
            s_v,
            n_out=hf_trace_data["max_n_samples"],
            **hf_trace_data.get("downsampler_kwargs", {}),
        )
        if isinstance(hf_trace_data["x"], pd.RangeIndex):
            # we avoid slicing the default pd.RangeIndex (as this is not an
            # in-memory array) - this proves to be faster than slicing the index.
            agg_x = (
                start_idx
                + hf_trace_data["x"].start
                + indices * hf_trace_data["x"].step
            )
        else:
            agg_x = hf_x[indices]
        agg_y = hf_y[indices]
    elif isinstance(downsampler, DataAggregator):
        agg_x, agg_y = downsampler.aggregate(
            hf_x_parsed,
            hf_y_parsed,
            n_out=hf_trace_data["max_n_samples"],
            **hf_trace_data.get("downsampler_kwargs", {}),
        )
        if isinstance(hf_trace_data["x"], pd.RangeIndex):
            # we avoid slicing the default pd.RangeIndex (as this is not an
            # in-memory array) - this proves to be faster than slicing the index.
            agg_x = (
                start_idx
                + hf_trace_data["x"].start
                + agg_x * hf_trace_data["x"].step
            )
        # The indices are just the range of the aggregated data
        indices = np.arange(len(agg_x))
    else:
        raise ValueError(
            "Invalid downsampler instance, must be either a "
            + f"DataAggregator or a DataPointSelector, got {type(downsampler)}"
        )

    return PlotlyAggregatorParser._handle_gaps(
        hf_trace_data, hf_x=hf_x, agg_x=agg_x, agg_y=agg_y, indices=indices
    )

get_start_end_indices(hf_trace_data, axis_type, start, end) staticmethod

Get the start & end indices of the high-frequency data.

Source code in plotly_resampler/aggregation/plotly_aggregator_parser.py
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
@staticmethod
def get_start_end_indices(hf_trace_data, axis_type, start, end) -> Tuple[int, int]:
    """Get the start & end indices of the high-frequency data."""
    # Base case: no hf data, or both start & end are None
    if not len(hf_trace_data["x"]):
        return 0, 0
    elif start is None and end is None:
        return 0, len(hf_trace_data["x"])

    # NOTE: as we use bisect right for the end index, we do not need to add a
    #      small epsilon to the end value
    start = hf_trace_data["x"][0] if start is None else start
    end = hf_trace_data["x"][-1] if end is None else end

    # NOTE: we must verify this before check if the x is a range-index
    if axis_type == "log":
        start, end = 10**start, 10**end

    # We can compute the start & end indices directly when it is a RangeIndex
    if isinstance(hf_trace_data["x"], pd.RangeIndex):
        x_start = hf_trace_data["x"].start
        x_step = hf_trace_data["x"].step
        start_idx = int(max((start - x_start) // x_step, 0))
        end_idx = int((end - x_start) // x_step)
        return start_idx, end_idx
    # TODO: this can be performed as-well for a fixed frequency range-index w/ freq

    if axis_type == "date":
        start, end = pd.to_datetime(start), pd.to_datetime(end)
        # convert start & end to the same timezone
        if isinstance(hf_trace_data["x"], pd.DatetimeIndex):
            tz = hf_trace_data["x"].tz
            assert start.tz == end.tz
            start = PlotlyAggregatorParser.to_same_tz(start, tz)
            end = PlotlyAggregatorParser.to_same_tz(end, tz)

    # Search the index-positions
    start_idx = bisect.bisect_left(hf_trace_data["x"], start)
    end_idx = bisect.bisect_right(hf_trace_data["x"], end)
    return start_idx, end_idx

parse_hf_data(hf_data) staticmethod

Parse the high-frequency data to a numpy array.

Source code in plotly_resampler/aggregation/plotly_aggregator_parser.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
@staticmethod
def parse_hf_data(
    hf_data: np.ndarray | pd.Categorical | pd.Series | pd.Index,
) -> np.ndarray | pd.Categorical:
    """Parse the high-frequency data to a numpy array."""
    # Categorical data (pandas)
    #   - pd.Series with categorical dtype -> calling .values will returns a
    #       pd.Categorical
    #   - pd.CategoricalIndex -> calling .values returns a pd.Categorical
    #   - pd.Categorical: has no .values attribute -> will not be parsed
    if isinstance(hf_data, pd.RangeIndex):
        return None
    if isinstance(hf_data, (pd.Series, pd.Index)):
        return hf_data.values
    return hf_data

to_same_tz(ts, reference_tz) staticmethod

Adjust ts its timezone to the reference_tz.

Source code in plotly_resampler/aggregation/plotly_aggregator_parser.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
@staticmethod
def to_same_tz(
    ts: Union[pd.Timestamp, None], reference_tz: Union[pytz.BaseTzInfo, None]
) -> Union[pd.Timestamp, None]:
    """Adjust `ts` its timezone to the `reference_tz`."""
    if ts is None:
        return None
    elif reference_tz is not None:
        if ts.tz is not None:
            assert ts.tz.__str__() == reference_tz.__str__()
            return ts
        else:  # localize -> time remains the same
            return ts.tz_localize(reference_tz)
    elif reference_tz is None and ts.tz is not None:
        return ts.tz_localize(None)
    return ts