repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column_accessor.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
from __future__ import annotations
import itertools
import warnings
from collections import abc
from functools import cached_property, reduce
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Mapping,
Optional,
Tuple,
Union,
)
import numpy as np
import pandas as pd
from packaging.version import Version
from pandas.api.types import is_bool
from typing_extensions import Self
import cudf
from cudf.core import column
if TYPE_CHECKING:
from cudf._typing import Dtype
from cudf.core.column import ColumnBase
class _NestedGetItemDict(dict):
"""A dictionary whose __getitem__ method accesses nested dicts.
This class directly subclasses dict for performance, so there are a number
of gotchas: 1) the only safe accessor for nested elements is
`__getitem__` (all other accessors will fail to perform nested lookups), 2)
nested mappings will not exhibit the same behavior (they will be raw
dictionaries unless explicitly created to be of this class), and 3) to
construct this class you _must_ use `from_zip` to get appropriate treatment
of tuple keys.
"""
@classmethod
def from_zip(cls, data):
"""Create from zip, specialized factory for nesting."""
obj = cls()
for key, value in data:
d = obj
for k in key[:-1]:
d = d.setdefault(k, {})
d[key[-1]] = value
return obj
def __getitem__(self, key):
"""Recursively apply dict.__getitem__ for nested elements."""
# As described in the pandas docs
# https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#advanced-indexing-with-hierarchical-index # noqa: E501
# accessing nested elements of a multiindex must be done using a tuple.
# Lists and other sequences are treated as accessing multiple elements
# at the top level of the index.
if isinstance(key, tuple):
return reduce(dict.__getitem__, key, self)
return super().__getitem__(key)
def _to_flat_dict_inner(d, parents=()):
for k, v in d.items():
if not isinstance(v, d.__class__):
if parents:
k = parents + (k,)
yield (k, v)
else:
yield from _to_flat_dict_inner(d=v, parents=parents + (k,))
def _to_flat_dict(d):
"""
Convert the given nested dictionary to a flat dictionary
with tuple keys.
"""
return {k: v for k, v in _to_flat_dict_inner(d)}
class ColumnAccessor(abc.MutableMapping):
"""
Parameters
----------
data : mapping
Mapping of keys to column values.
multiindex : bool, optional
Whether tuple keys represent a hierarchical
index with multiple "levels" (default=False).
level_names : tuple, optional
Tuple containing names for each of the levels.
For a non-hierarchical index, a tuple of size 1
may be passe.
rangeindex : bool, optional
Whether the keys should be returned as a RangeIndex
in `to_pandas_index` (default=False).
label_dtype : Dtype, optional
What dtype should be returned in `to_pandas_index`
(default=None).
"""
_data: "Dict[Any, ColumnBase]"
multiindex: bool
_level_names: Tuple[Any, ...]
def __init__(
self,
data: Union[abc.MutableMapping, ColumnAccessor, None] = None,
multiindex: bool = False,
level_names=None,
rangeindex: bool = False,
label_dtype: Dtype | None = None,
):
self.rangeindex = rangeindex
self.label_dtype = label_dtype
if data is None:
data = {}
# TODO: we should validate the keys of `data`
if isinstance(data, ColumnAccessor):
multiindex = multiindex or data.multiindex
level_names = level_names or data.level_names
self._data = data._data
self.multiindex = multiindex
self._level_names = level_names
self.rangeindex = data.rangeindex
self.label_dtype = data.label_dtype
else:
# This code path is performance-critical for copies and should be
# modified with care.
self._data = {}
if data:
data = dict(data)
# Faster than next(iter(data.values()))
column_length = len(data[next(iter(data))])
for k, v in data.items():
# Much faster to avoid the function call if possible; the
# extra isinstance is negligible if we do have to make a
# column from something else.
if not isinstance(v, column.ColumnBase):
v = column.as_column(v)
if len(v) != column_length:
raise ValueError("All columns must be of equal length")
self._data[k] = v
self.multiindex = multiindex
self._level_names = level_names
@classmethod
def _create_unsafe(
cls,
data: Dict[Any, ColumnBase],
multiindex: bool = False,
level_names=None,
) -> ColumnAccessor:
# create a ColumnAccessor without verifying column
# type or size
obj = cls()
obj._data = data
obj.multiindex = multiindex
obj._level_names = level_names
return obj
def __iter__(self):
return iter(self._data)
def __getitem__(self, key: Any) -> ColumnBase:
return self._data[key]
def __setitem__(self, key: Any, value: Any):
self.set_by_label(key, value)
def __delitem__(self, key: Any):
del self._data[key]
self._clear_cache()
def __len__(self) -> int:
return len(self._data)
def __repr__(self) -> str:
type_info = (
f"{self.__class__.__name__}("
f"multiindex={self.multiindex}, "
f"level_names={self.level_names})"
)
column_info = "\n".join(
[f"{name}: {col.dtype}" for name, col in self.items()]
)
return f"{type_info}\n{column_info}"
@property
def level_names(self) -> Tuple[Any, ...]:
if self._level_names is None or len(self._level_names) == 0:
return tuple((None,) * max(1, self.nlevels))
else:
return self._level_names
@property
def nlevels(self) -> int:
if len(self._data) == 0:
return 0
if not self.multiindex:
return 1
else:
return len(next(iter(self.keys())))
@property
def name(self) -> Any:
return self.level_names[-1]
@property
def nrows(self) -> int:
if len(self._data) == 0:
return 0
else:
return len(next(iter(self.values())))
@cached_property
def names(self) -> Tuple[Any, ...]:
return tuple(self.keys())
@cached_property
def columns(self) -> Tuple[ColumnBase, ...]:
return tuple(self.values())
@cached_property
def _grouped_data(self) -> abc.MutableMapping:
"""
If self.multiindex is True,
return the underlying mapping as a nested mapping.
"""
if self.multiindex:
return _NestedGetItemDict.from_zip(zip(self.names, self.columns))
else:
return self._data
@cached_property
def _column_length(self):
try:
return len(self._data[next(iter(self._data))])
except StopIteration:
return 0
def _clear_cache(self):
cached_properties = ("columns", "names", "_grouped_data")
for attr in cached_properties:
try:
self.__delattr__(attr)
except AttributeError:
pass
# Column length should only be cleared if no data is present.
if len(self._data) == 0 and hasattr(self, "_column_length"):
del self._column_length
def to_pandas_index(self) -> pd.Index:
"""Convert the keys of the ColumnAccessor to a Pandas Index object."""
if self.multiindex and len(self.level_names) > 0:
# Using `from_frame()` instead of `from_tuples`
# prevents coercion of values to a different type
# (e.g., ''->NaT)
with warnings.catch_warnings():
# Specifying `dtype="object"` here and passing that to
# `from_frame` is deprecated in pandas, but we cannot remove
# that without also losing compatibility with other current
# pandas behaviors like the NaT inference above. For now we
# must catch the warnings internally, but we will need to
# remove this when we implement compatibility with pandas 2.0,
# which will remove these compatibility layers.
assert Version(pd.__version__) < Version("2.0.0")
warnings.simplefilter("ignore")
result = pd.MultiIndex.from_frame(
pd.DataFrame(
self.names, columns=self.level_names, dtype="object"
),
)
else:
# Determine if we can return a RangeIndex
if self.rangeindex:
if not self.names:
return pd.RangeIndex(
start=0, stop=0, step=1, name=self.name
)
elif cudf.api.types.infer_dtype(self.names) == "integer":
if len(self.names) == 1:
start = self.names[0]
return pd.RangeIndex(
start=start, stop=start + 1, step=1, name=self.name
)
uniques = np.unique(np.diff(np.array(self.names)))
if len(uniques) == 1 and uniques[0] != 0:
diff = uniques[0]
new_range = range(
self.names[0], self.names[-1] + diff, diff
)
return pd.RangeIndex(new_range, name=self.name)
result = pd.Index(
self.names,
name=self.name,
tupleize_cols=False,
dtype=self.label_dtype,
)
return result
def insert(
self, name: Any, value: Any, loc: int = -1, validate: bool = True
):
"""
Insert column into the ColumnAccessor at the specified location.
Parameters
----------
name : Name corresponding to the new column
value : column-like
loc : int, optional
The location to insert the new value at.
Must be (0 <= loc <= ncols). By default, the column is added
to the end.
Returns
-------
None, this function operates in-place.
"""
name = self._pad_key(name)
ncols = len(self._data)
if loc == -1:
loc = ncols
if not (0 <= loc <= ncols):
raise ValueError(
"insert: loc out of bounds: must be 0 <= loc <= ncols"
)
# TODO: we should move all insert logic here
if name in self._data:
raise ValueError(f"Cannot insert '{name}', already exists")
if loc == len(self._data):
if validate:
value = column.as_column(value)
if len(self._data) > 0:
if len(value) != self._column_length:
raise ValueError("All columns must be of equal length")
else:
self._column_length = len(value)
self._data[name] = value
else:
new_keys = self.names[:loc] + (name,) + self.names[loc:]
new_values = self.columns[:loc] + (value,) + self.columns[loc:]
self._data = self._data.__class__(zip(new_keys, new_values))
self._clear_cache()
def copy(self, deep=False) -> ColumnAccessor:
"""
Make a copy of this ColumnAccessor.
"""
if deep or cudf.get_option("copy_on_write"):
return self.__class__(
{k: v.copy(deep=deep) for k, v in self._data.items()},
multiindex=self.multiindex,
level_names=self.level_names,
)
return self.__class__(
self._data.copy(),
multiindex=self.multiindex,
level_names=self.level_names,
)
def select_by_label(self, key: Any) -> ColumnAccessor:
"""
Return a subset of this column accessor,
composed of the keys specified by `key`.
Parameters
----------
key : slice, list-like, tuple or scalar
Returns
-------
ColumnAccessor
"""
if isinstance(key, slice):
return self._select_by_label_slice(key)
elif pd.api.types.is_list_like(key) and not isinstance(key, tuple):
return self._select_by_label_list_like(key)
else:
if isinstance(key, tuple):
if any(isinstance(k, slice) for k in key):
return self._select_by_label_with_wildcard(key)
return self._select_by_label_grouped(key)
def get_labels_by_index(self, index: Any) -> tuple:
"""Get the labels corresponding to the provided column indices.
Parameters
----------
index : integer, integer slice, boolean mask,
or list-like of integers
The column indexes.
Returns
-------
tuple
"""
if isinstance(index, slice):
start, stop, step = index.indices(len(self._data))
return self.names[start:stop:step]
elif pd.api.types.is_integer(index):
return (self.names[index],)
elif (bn := len(index)) > 0 and all(map(is_bool, index)):
if bn != (n := len(self.names)):
raise IndexError(
f"Boolean mask has wrong length: {bn} not {n}"
)
if isinstance(index, (pd.Series, cudf.Series)):
# Don't allow iloc indexing with series
raise NotImplementedError(
"Cannot use Series object for mask iloc indexing"
)
# TODO: Doesn't handle on-device columns
return tuple(n for n, keep in zip(self.names, index) if keep)
else:
return tuple(self.names[i] for i in index)
def select_by_index(self, index: Any) -> ColumnAccessor:
"""
Return a ColumnAccessor composed of the columns
specified by index.
Parameters
----------
key : integer, integer slice, boolean mask,
or list-like of integers
Returns
-------
ColumnAccessor
"""
keys = self.get_labels_by_index(index)
data = {k: self._data[k] for k in keys}
return self.__class__(
data,
multiindex=self.multiindex,
level_names=self.level_names,
)
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int or str, default -2
First level of index to be swapped.
j : int or str, default -1
Second level of index to be swapped.
Returns
-------
ColumnAccessor
"""
i = _get_level(i, self.nlevels, self.level_names)
j = _get_level(j, self.nlevels, self.level_names)
new_keys = [list(row) for row in self]
new_dict = {}
# swap old keys for i and j
for n, row in enumerate(self.names):
new_keys[n][i], new_keys[n][j] = row[j], row[i]
new_dict.update({row: tuple(new_keys[n])})
new_data = {new_dict[k]: v.copy(deep=True) for k, v in self.items()}
# swap level_names for i and j
new_names = list(self.level_names)
new_names[i], new_names[j] = new_names[j], new_names[i]
return self.__class__(
new_data,
multiindex=True,
level_names=new_names,
)
def set_by_label(self, key: Any, value: Any, validate: bool = True):
"""
Add (or modify) column by name.
Parameters
----------
key
name of the column
value : column-like
The value to insert into the column.
validate : bool
If True, the provided value will be coerced to a column and
validated before setting (Default value = True).
"""
key = self._pad_key(key)
if validate:
value = column.as_column(value)
if len(self._data) > 0:
if len(value) != self._column_length:
raise ValueError("All columns must be of equal length")
else:
self._column_length = len(value)
self._data[key] = value
self._clear_cache()
def _select_by_names(self, names: abc.Sequence) -> Self:
return self.__class__(
{key: self[key] for key in names},
multiindex=self.multiindex,
level_names=self.level_names,
)
def _select_by_label_list_like(self, key: Any) -> ColumnAccessor:
# Might be a generator
key = tuple(key)
# Special-casing for boolean mask
if (bn := len(key)) > 0 and all(map(is_bool, key)):
if bn != (n := len(self.names)):
raise IndexError(
f"Boolean mask has wrong length: {bn} not {n}"
)
data = dict(
item
for item, keep in zip(self._grouped_data.items(), key)
if keep
)
else:
data = {k: self._grouped_data[k] for k in key}
if self.multiindex:
data = _to_flat_dict(data)
return self.__class__(
data,
multiindex=self.multiindex,
level_names=self.level_names,
)
def _select_by_label_grouped(self, key: Any) -> ColumnAccessor:
result = self._grouped_data[key]
if isinstance(result, cudf.core.column.ColumnBase):
return self.__class__({key: result}, multiindex=self.multiindex)
else:
if self.multiindex:
result = _to_flat_dict(result)
if not isinstance(key, tuple):
key = (key,)
return self.__class__(
result,
multiindex=self.nlevels - len(key) > 1,
level_names=self.level_names[len(key) :],
)
def _select_by_label_slice(self, key: slice) -> ColumnAccessor:
start, stop = key.start, key.stop
if key.step is not None:
raise TypeError("Label slicing with step is not supported")
if start is None:
start = self.names[0]
if stop is None:
stop = self.names[-1]
start = self._pad_key(start, slice(None))
stop = self._pad_key(stop, slice(None))
for idx, name in enumerate(self.names):
if _compare_keys(name, start):
start_idx = idx
break
for idx, name in enumerate(reversed(self.names)):
if _compare_keys(name, stop):
stop_idx = len(self.names) - idx
break
keys = self.names[start_idx:stop_idx]
return self.__class__(
{k: self._data[k] for k in keys},
multiindex=self.multiindex,
level_names=self.level_names,
)
def _select_by_label_with_wildcard(self, key: Any) -> ColumnAccessor:
key = self._pad_key(key, slice(None))
return self.__class__(
{k: self._data[k] for k in self._data if _compare_keys(k, key)},
multiindex=self.multiindex,
level_names=self.level_names,
)
def _pad_key(self, key: Any, pad_value="") -> Any:
"""
Pad the provided key to a length equal to the number
of levels.
"""
if not self.multiindex:
return key
if not isinstance(key, tuple):
key = (key,)
return key + (pad_value,) * (self.nlevels - len(key))
def rename_levels(
self, mapper: Union[Mapping[Any, Any], Callable], level: Optional[int]
) -> ColumnAccessor:
"""
Rename the specified levels of the given ColumnAccessor
Parameters
----------
self : ColumnAccessor of a given dataframe
mapper : dict-like or function transformations to apply to
the column label values depending on selected ``level``.
If dict-like, only replace the specified level of the
ColumnAccessor's keys (that match the mapper's keys) with
mapper's values
If callable, the function is applied only to the specified level
of the ColumnAccessor's keys.
level : int
In case of RangeIndex, only supported level is [0, None].
In case of a MultiColumn, only the column labels in the specified
level of the ColumnAccessor's keys will be transformed.
Returns
-------
A new ColumnAccessor with values in the keys replaced according
to the given mapper and level.
"""
if self.multiindex:
def rename_column(x):
x = list(x)
if isinstance(mapper, Mapping):
x[level] = mapper.get(x[level], x[level])
else:
x[level] = mapper(x[level])
x = tuple(x)
return x
if level is None:
raise NotImplementedError(
"Renaming columns with a MultiIndex and level=None is"
"not supported"
)
new_names = map(rename_column, self.keys())
ca = ColumnAccessor(
dict(zip(new_names, self.values())),
level_names=self.level_names,
multiindex=self.multiindex,
)
else:
if level is None:
level = 0
if level != 0:
raise IndexError(
f"Too many levels: Index has only 1 level, not {level+1}"
)
if isinstance(mapper, Mapping):
new_col_names = [
mapper.get(col_name, col_name) for col_name in self.keys()
]
else:
new_col_names = [mapper(col_name) for col_name in self.keys()]
if len(new_col_names) != len(set(new_col_names)):
raise ValueError("Duplicate column names are not allowed")
ca = ColumnAccessor(
dict(zip(new_col_names, self.values())),
level_names=self.level_names,
multiindex=self.multiindex,
)
return self.__class__(ca)
def droplevel(self, level):
# drop the nth level
if level < 0:
level += self.nlevels
self._data = {
_remove_key_level(key, level): value
for key, value in self._data.items()
}
self._level_names = (
self._level_names[:level] + self._level_names[level + 1 :]
)
if (
len(self._level_names) == 1
): # can't use nlevels, as it depends on multiindex
self.multiindex = False
self._clear_cache()
def _compare_keys(target: Any, key: Any) -> bool:
"""
Compare `key` to `target`.
Return True if each value in `key` == corresponding value in `target`.
If any value in `key` is slice(None), it is considered equal
to the corresponding value in `target`.
"""
if not isinstance(target, tuple):
return target == key
for k1, k2 in itertools.zip_longest(target, key, fillvalue=None):
if k2 == slice(None):
continue
if k1 != k2:
return False
return True
def _remove_key_level(key: Any, level: int) -> Any:
"""
Remove a level from key. If detupleize is True, and if only a
single level remains, convert the tuple to a scalar.
"""
result = key[:level] + key[level + 1 :]
if len(result) == 1:
return result[0]
return result
def _get_level(x, nlevels, level_names):
"""Get the level index from a level number or name.
If given an integer, this function will handle wraparound for
negative values. If given a string (the level name), this function
will extract the index of that level from `level_names`.
Parameters
----------
x
The level number to validate
nlevels
The total available levels in the MultiIndex
level_names
The names of the levels.
"""
if isinstance(x, int):
if x < 0:
x += nlevels
if x >= nlevels:
raise IndexError(
f"Level {x} out of bounds. Index has {nlevels} levels."
)
return x
else:
x = level_names.index(x)
return x
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/abc.py
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
"""Common abstract base classes for cudf."""
import pickle
import numpy
import cudf
class Serializable:
"""A serializable object composed of device memory buffers.
This base class defines a standard serialization protocol for objects
encapsulating device memory buffers. Serialization proceeds by copying
device data onto the host, then returning it along with suitable metadata
for reconstruction of the object. Deserialization performs the reverse
process, copying the serialized data from the host to new device buffers.
Subclasses must define the abstract methods :meth:`~.serialize` and
:meth:`~.deserialize`. The former defines the conversion of the object
into a representative collection of metadata and data buffers, while the
latter converts back from that representation into an equivalent object.
"""
def serialize(self):
"""Generate an equivalent serializable representation of an object.
Subclasses must implement this method to define how the attributes of
the object are converted into a serializable representation. A common
solution is to construct a list containing device buffer attributes in
a well-defined order that can be reinterpreted upon deserialization,
then place all other lightweight attributes into the metadata
dictionary.
Returns
-------
Tuple[Dict, List]
The first element of the returned tuple is a dict containing any
serializable metadata required to reconstruct the object. The
second element is a list containing the device data buffers
or memoryviews of the object.
:meta private:
"""
raise NotImplementedError(
"Subclasses of Serializable must implement serialize"
)
@classmethod
def deserialize(cls, header, frames):
"""Generate an object from a serialized representation.
Subclasses must implement this method to define how objects of that
class can be constructed from a serialized representation generalized
by :meth:`serialize`.
Parameters
----------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffers or memoryviews that the object should contain.
Returns
-------
Serializable
A new instance of `cls` (a subclass of `Serializable`) equivalent
to the instance that was serialized to produce the header and
frames.
:meta private:
"""
raise NotImplementedError(
"Subclasses of Serializable must implement deserialize"
)
def device_serialize(self):
"""Serialize data and metadata associated with device memory.
Returns
-------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffer or memoryview objects that the object
should contain.
:meta private:
"""
header, frames = self.serialize()
assert all(
isinstance(f, (cudf.core.buffer.Buffer, memoryview))
for f in frames
)
header["type-serialized"] = pickle.dumps(type(self))
header["is-cuda"] = [
hasattr(f, "__cuda_array_interface__") for f in frames
]
header["lengths"] = [f.nbytes for f in frames]
return header, frames
@classmethod
def device_deserialize(cls, header, frames):
"""Perform device-side deserialization tasks.
The primary purpose of this method is the creation of device memory
buffers from host buffers where necessary.
Parameters
----------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffers or memoryviews that the object should contain.
Returns
-------
Serializable
A new instance of `cls` (a subclass of `Serializable`) equivalent
to the instance that was serialized to produce the header and
frames.
:meta private:
"""
typ = pickle.loads(header["type-serialized"])
frames = [
cudf.core.buffer.as_buffer(f) if c else memoryview(f)
for c, f in zip(header["is-cuda"], frames)
]
return typ.deserialize(header, frames)
def host_serialize(self):
"""Serialize data and metadata associated with host memory.
Returns
-------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffers or memoryviews that the object should contain.
:meta private:
"""
header, frames = self.device_serialize()
header["writeable"] = len(frames) * (None,)
frames = [
f.memoryview() if c else memoryview(f)
for c, f in zip(header["is-cuda"], frames)
]
return header, frames
@classmethod
def host_deserialize(cls, header, frames):
"""Perform device-side deserialization tasks.
Parameters
----------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffers or memoryviews that the object should contain.
Returns
-------
Serializable
A new instance of `cls` (a subclass of `Serializable`) equivalent
to the instance that was serialized to produce the header and
frames.
:meta private:
"""
frames = [
cudf.core.buffer.as_buffer(f) if c else f
for c, f in zip(header["is-cuda"], map(memoryview, frames))
]
obj = cls.device_deserialize(header, frames)
return obj
def __reduce_ex__(self, protocol):
header, frames = self.host_serialize()
# Since memoryviews are not pickable, we convert them to numpy
# arrays (zero-copy). This works seamlessly because host_deserialize
# converts the frames back into memoryviews.
frames = [numpy.asarray(f) for f in frames]
return self.host_deserialize, (header, frames)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/scalar.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import decimal
import operator
from collections import OrderedDict
import numpy as np
import pyarrow as pa
import cudf
from cudf.api.types import is_datetime64_dtype, is_scalar, is_timedelta64_dtype
from cudf.core.dtypes import ListDtype, StructDtype
from cudf.core.missing import NA, NaT
from cudf.core.mixins import BinaryOperand
from cudf.utils.dtypes import (
get_allowed_combinations_for_operator,
to_cudf_compatible_scalar,
)
# Note that the metaclass below can easily be generalized for use with
# other classes, if needed in the future. Simply replace the arguments
# of the `__call__` method with `*args` and `**kwargs`. This will
# result in additional overhead when constructing the cache key, as
# unpacking *args and **kwargs is not cheap. See the discussion in
# https://github.com/rapidsai/cudf/pull/11246#discussion_r955843532
# for details.
class CachedScalarInstanceMeta(type):
"""
Metaclass for Scalar that caches `maxsize` instances.
After `maxsize` is reached, evicts the least recently used
instances to make room for new values.
"""
def __new__(cls, names, bases, attrs, **kwargs):
return type.__new__(cls, names, bases, attrs)
# choose 128 because that's the default `maxsize` for
# `functools.lru_cache`:
def __init__(self, names, bases, attrs, maxsize=128):
self.__maxsize = maxsize
self.__instances = OrderedDict()
def __call__(self, value, dtype=None):
# the cache key is constructed from the arguments, and also
# the _types_ of the arguments, since objects of different
# types can compare equal
cache_key = (value, type(value), dtype, type(dtype))
try:
# try retrieving an instance from the cache:
self.__instances.move_to_end(cache_key)
return self.__instances[cache_key]
except KeyError:
# if an instance couldn't be found in the cache,
# construct it and add to cache:
obj = super().__call__(value, dtype=dtype)
try:
self.__instances[cache_key] = obj
except TypeError:
# couldn't hash the arguments, don't cache:
return obj
if len(self.__instances) > self.__maxsize:
self.__instances.popitem(last=False)
return obj
except TypeError:
# couldn't hash the arguments, don't cache:
return super().__call__(value, dtype=dtype)
def _clear_instance_cache(self):
self.__instances.clear()
class Scalar(BinaryOperand, metaclass=CachedScalarInstanceMeta):
"""
A GPU-backed scalar object with NumPy scalar like properties
May be used in binary operations against other scalars, cuDF
Series, DataFrame, and Index objects.
Examples
--------
>>> import cudf
>>> cudf.Scalar(42, dtype='int64')
Scalar(42, dtype=int64)
>>> cudf.Scalar(42, dtype='int32') + cudf.Scalar(42, dtype='float64')
Scalar(84.0, dtype=float64)
>>> cudf.Scalar(42, dtype='int64') + np.int8(21)
Scalar(63, dtype=int64)
>>> x = cudf.Scalar(42, dtype='datetime64[s]')
>>> y = cudf.Scalar(21, dtype='timedelta64[ns]')
>>> x - y
Scalar(1970-01-01T00:00:41.999999979, dtype=datetime64[ns])
>>> cudf.Series([1,2,3]) + cudf.Scalar(1)
0 2
1 3
2 4
dtype: int64
>>> df = cudf.DataFrame({'a':[1,2,3], 'b':[4.5, 5.5, 6.5]})
>>> slr = cudf.Scalar(10, dtype='uint8')
>>> df - slr
a b
0 -9 -5.5
1 -8 -4.5
2 -7 -3.5
Parameters
----------
value : Python Scalar, NumPy Scalar, or cuDF Scalar
The scalar value to be converted to a GPU backed scalar object
dtype : np.dtype or string specifier
The data type
"""
_VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS
def __init__(self, value, dtype=None):
self._host_value = None
self._host_dtype = None
self._device_value = None
if isinstance(value, Scalar):
if value._is_host_value_current:
self._host_value = value._host_value
self._host_dtype = value._host_dtype
else:
self._device_value = value._device_value
else:
self._host_value, self._host_dtype = self._preprocess_host_value(
value, dtype
)
@classmethod
def from_device_scalar(cls, device_scalar):
if not isinstance(device_scalar, cudf._lib.scalar.DeviceScalar):
raise TypeError(
"Expected an instance of DeviceScalar, "
f"got {type(device_scalar).__name__}"
)
obj = object.__new__(cls)
obj._host_value = None
obj._host_dtype = None
obj._device_value = device_scalar
return obj
@property
def _is_host_value_current(self):
return self._host_value is not None
@property
def _is_device_value_current(self):
return self._device_value is not None
@property
def device_value(self):
if self._device_value is None:
self._device_value = cudf._lib.scalar.DeviceScalar(
self._host_value, self._host_dtype
)
return self._device_value
@property
def value(self):
if not self._is_host_value_current:
self._device_value_to_host()
return self._host_value
# todo: change to cached property
@property
def dtype(self):
if self._is_host_value_current:
if isinstance(self._host_value, str):
return cudf.dtype("object")
else:
return self._host_dtype
else:
return self.device_value.dtype
def is_valid(self):
if not self._is_host_value_current:
self._device_value_to_host()
return not cudf._lib.scalar._is_null_host_scalar(self._host_value)
def _device_value_to_host(self):
self._host_value = self._device_value._to_host_scalar()
def _preprocess_host_value(self, value, dtype):
valid = not cudf._lib.scalar._is_null_host_scalar(value)
if isinstance(value, list):
if dtype is not None:
raise TypeError("Lists may not be cast to a different dtype")
else:
dtype = ListDtype.from_arrow(
pa.infer_type([value], from_pandas=True)
)
return value, dtype
elif isinstance(dtype, ListDtype):
if value not in {None, NA}:
raise ValueError(f"Can not coerce {value} to ListDtype")
else:
return NA, dtype
if isinstance(value, dict):
if dtype is None:
dtype = StructDtype.from_arrow(
pa.infer_type([value], from_pandas=True)
)
return value, dtype
elif isinstance(dtype, StructDtype):
if value not in {None, NA}:
raise ValueError(f"Can not coerce {value} to StructDType")
else:
return NA, dtype
if isinstance(dtype, cudf.core.dtypes.DecimalDtype):
value = pa.scalar(
value, type=pa.decimal128(dtype.precision, dtype.scale)
).as_py()
if isinstance(value, decimal.Decimal) and dtype is None:
dtype = cudf.Decimal128Dtype._from_decimal(value)
value = to_cudf_compatible_scalar(value, dtype=dtype)
if dtype is None:
if not valid:
if isinstance(value, (np.datetime64, np.timedelta64)):
unit, _ = np.datetime_data(value)
if unit == "generic":
raise TypeError(
"Cant convert generic NaT to null scalar"
)
else:
dtype = value.dtype
else:
raise TypeError(
"dtype required when constructing a null scalar"
)
else:
dtype = value.dtype
if not isinstance(dtype, cudf.core.dtypes.DecimalDtype):
dtype = cudf.dtype(dtype)
if not valid:
value = (
NaT
if is_datetime64_dtype(dtype) or is_timedelta64_dtype(dtype)
else NA
)
return value, dtype
def _sync(self):
"""
If the cache is not synched, copy either the device or host value
to the host or device respectively. If cache is valid, do nothing
"""
if self._is_host_value_current and self._is_device_value_current:
return
elif self._is_host_value_current and not self._is_device_value_current:
self._device_value = cudf._lib.scalar.DeviceScalar(
self._host_value, self._host_dtype
)
elif self._is_device_value_current and not self._is_host_value_current:
self._host_value = self._device_value.value
self._host_dtype = self._host_value.dtype
else:
raise ValueError("Invalid cudf.Scalar")
def __index__(self):
if self.dtype.kind not in {"u", "i"}:
raise TypeError("Only Integer typed scalars may be used in slices")
return int(self)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __bool__(self):
return bool(self.value)
def __round__(self, n):
return self._binaryop(n, "__round__")
# Scalar Unary Operations
def __abs__(self):
return self._scalar_unaop("__abs__")
def __ceil__(self):
return self._scalar_unaop("__ceil__")
def __floor__(self):
return self._scalar_unaop("__floor__")
def __invert__(self):
return self._scalar_unaop("__invert__")
def __neg__(self):
return self._scalar_unaop("__neg__")
def __repr__(self):
# str() fixes a numpy bug with NaT
# https://github.com/numpy/numpy/issues/17552
return (
f"{self.__class__.__name__}"
f"({str(self.value)}, dtype={self.dtype})"
)
def _binop_result_dtype_or_error(self, other, op):
if op in {"__eq__", "__ne__", "__lt__", "__gt__", "__le__", "__ge__"}:
return np.bool_
out_dtype = get_allowed_combinations_for_operator(
self.dtype, other.dtype, op
)
# datetime handling
if out_dtype in {"M", "m"}:
if self.dtype.char in {"M", "m"} and other.dtype.char not in {
"M",
"m",
}:
return self.dtype
if other.dtype.char in {"M", "m"} and self.dtype.char not in {
"M",
"m",
}:
return other.dtype
else:
if (
op == "__sub__"
and self.dtype.char == other.dtype.char == "M"
):
res, _ = np.datetime_data(max(self.dtype, other.dtype))
return cudf.dtype("m8" + f"[{res}]")
return np.result_type(self.dtype, other.dtype)
return cudf.dtype(out_dtype)
def _binaryop(self, other, op: str):
if is_scalar(other):
other = to_cudf_compatible_scalar(other)
out_dtype = self._binop_result_dtype_or_error(other, op)
valid = self.is_valid() and (
isinstance(other, np.generic) or other.is_valid()
)
if not valid:
return Scalar(None, dtype=out_dtype)
else:
result = self._dispatch_scalar_binop(other, op)
return Scalar(result, dtype=out_dtype)
else:
return NotImplemented
def _dispatch_scalar_binop(self, other, op):
if isinstance(other, Scalar):
rhs = other.value
else:
rhs = other
lhs = self.value
reflect, op = self._check_reflected_op(op)
if reflect:
lhs, rhs = rhs, lhs
try:
return getattr(operator, op)(lhs, rhs)
except AttributeError:
return getattr(lhs, op)(rhs)
def _unaop_result_type_or_error(self, op):
if op == "__neg__" and self.dtype == "bool":
raise TypeError(
"Boolean scalars in cuDF do not support"
" negation, use logical not"
)
if op in {"__ceil__", "__floor__"}:
if self.dtype.char in "bBhHf?":
return cudf.dtype("float32")
else:
return cudf.dtype("float64")
return self.dtype
def _scalar_unaop(self, op):
out_dtype = self._unaop_result_type_or_error(op)
if not self.is_valid():
result = None
else:
result = self._dispatch_scalar_unaop(op)
return Scalar(result, dtype=out_dtype)
def _dispatch_scalar_unaop(self, op):
if op == "__floor__":
return np.floor(self.value)
if op == "__ceil__":
return np.ceil(self.value)
return getattr(self.value, op)()
def astype(self, dtype):
if self.dtype == dtype:
return self
return Scalar(self.value, dtype)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/_compat.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import pandas as pd
from packaging import version
PANDAS_VERSION = version.parse(pd.__version__)
PANDAS_GE_133 = PANDAS_VERSION >= version.parse("1.3.3")
PANDAS_GE_134 = PANDAS_VERSION >= version.parse("1.3.4")
PANDAS_LT_140 = PANDAS_VERSION < version.parse("1.4.0")
PANDAS_GE_150 = PANDAS_VERSION >= version.parse("1.5.0")
PANDAS_LT_153 = PANDAS_VERSION < version.parse("1.5.3")
PANDAS_GE_200 = PANDAS_VERSION >= version.parse("2.0.0")
PANDAS_GE_210 = PANDAS_VERSION >= version.parse("2.1.0")
PANDAS_GE_220 = PANDAS_VERSION >= version.parse("2.2.0")
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/reshape.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
import itertools
import warnings
from collections import abc
from typing import Dict, Optional
import cupy
import numpy as np
import pandas as pd
import cudf
from cudf._lib.transform import one_hot_encode
from cudf._lib.types import size_type_dtype
from cudf._typing import Dtype
from cudf.api.extensions import no_default
from cudf.core.column import ColumnBase, as_column, column_empty_like
from cudf.core.column.categorical import CategoricalColumn
from cudf.utils.dtypes import min_unsigned_type
_AXIS_MAP = {0: 0, 1: 1, "index": 0, "columns": 1}
def _align_objs(objs, how="outer", sort=None):
"""Align a set of Series or Dataframe objects.
Parameters
----------
objs : list of DataFrame, Series, or Index
how : How to handle indexes on other axis (or axes),
similar to join in concat
sort : Whether to sort the resulting Index
Returns
-------
A list of reindexed and aligned objects
ready for concatenation
"""
# Check if multiindex then check if indexes match. GenericIndex
# returns ndarray tuple of bools requiring additional filter.
# Then check for duplicate index value.
i_objs = iter(objs)
first = next(i_objs)
not_matching_index = any(
not first.index.equals(rest.index) for rest in i_objs
)
if not_matching_index:
if not all(o.index.is_unique for o in objs):
raise ValueError("cannot reindex on an axis with duplicate labels")
index = objs[0].index
name = index.name
final_index = _get_combined_index(
[obj.index for obj in objs], intersect=how == "inner", sort=sort
)
final_index.name = name
return [
obj.reindex(final_index)
if not final_index.equals(obj.index)
else obj
for obj in objs
]
else:
if sort:
if not first.index.is_monotonic_increasing:
final_index = first.index.sort_values()
return [obj.reindex(final_index) for obj in objs]
return objs
def _get_combined_index(indexes, intersect: bool = False, sort=None):
if len(indexes) == 0:
index = cudf.Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
sort = True
index = indexes[0]
for other in indexes[1:]:
# Don't sort for every intersection,
# let the sorting happen in the end.
index = index.intersection(other, sort=False)
else:
index = indexes[0]
if sort is None:
sort = not isinstance(index, cudf.StringIndex)
for other in indexes[1:]:
index = index.union(other, sort=False)
if sort:
if not index.is_monotonic_increasing:
index = index.sort_values()
return index
def _normalize_series_and_dataframe(objs, axis):
"""Convert any cudf.Series objects in objs to DataFrames in place."""
# Default to naming series by a numerical id if they are not named.
sr_name = 0
for idx, o in enumerate(objs):
if isinstance(o, cudf.Series):
if axis == 1:
name = o.name
if name is None:
name = sr_name
sr_name += 1
else:
name = sr_name
objs[idx] = o.to_frame(name=name)
def concat(objs, axis=0, join="outer", ignore_index=False, sort=None):
"""Concatenate DataFrames, Series, or Indices row-wise.
Parameters
----------
objs : list of DataFrame, Series, or Index
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
Set True to ignore the index of the *objs* and provide a
default range index instead.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
A new object of like type with rows from each object in ``objs``.
Examples
--------
Combine two ``Series``.
>>> import cudf
>>> s1 = cudf.Series(['a', 'b'])
>>> s2 = cudf.Series(['c', 'd'])
>>> s1
0 a
1 b
dtype: object
>>> s2
0 c
1 d
dtype: object
>>> cudf.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the
result by setting the ``ignore_index`` option to ``True``.
>>> cudf.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two DataFrame objects with identical columns.
>>> df1 = cudf.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = cudf.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> cudf.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine DataFrame objects with overlapping columns and return
everything. Columns outside the intersection will
be filled with ``null`` values.
>>> df3 = cudf.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> cudf.concat([df1, df3], sort=False)
letter number animal
0 a 1 <NA>
1 b 2 <NA>
0 c 3 cat
1 d 4 dog
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> cudf.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` objects horizontally along the
x axis by passing in ``axis=1``.
>>> df4 = cudf.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
>>> df4
animal name
0 bird polly
1 monkey george
>>> cudf.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
"""
# TODO: Do we really need to have different error messages for an empty
# list and a list of None?
if not objs:
raise ValueError("No objects to concatenate")
objs = [obj for obj in objs if obj is not None]
if not objs:
raise ValueError("All objects passed were None")
axis = _AXIS_MAP.get(axis, None)
if axis is None:
raise ValueError(
f'`axis` must be 0 / "index" or 1 / "columns", got: {axis}'
)
# Return for single object
if len(objs) == 1:
obj = objs[0]
if ignore_index:
if axis == 1:
result = cudf.DataFrame._from_data(
data=obj._data.copy(deep=True),
index=obj.index.copy(deep=True),
)
# The DataFrame constructor for dict-like data (such as the
# ColumnAccessor given by obj._data here) will drop any columns
# in the data that are not in `columns`, so we have to rename
# after construction.
result.columns = pd.RangeIndex(len(obj._data.names))
else:
if isinstance(obj, cudf.Series):
result = cudf.Series._from_data(
data=obj._data.copy(deep=True),
index=cudf.RangeIndex(len(obj)),
)
elif isinstance(obj, pd.Series):
result = cudf.Series(
data=obj,
index=cudf.RangeIndex(len(obj)),
)
else:
result = cudf.DataFrame._from_data(
data=obj._data.copy(deep=True),
index=cudf.RangeIndex(len(obj)),
)
else:
if axis == 0:
result = obj.copy()
else:
data = obj._data.copy(deep=True)
if isinstance(obj, cudf.Series) and obj.name is None:
# If the Series has no name, pandas renames it to 0.
data[0] = data.pop(None)
result = cudf.DataFrame._from_data(
data, index=obj.index.copy(deep=True)
)
if isinstance(result, cudf.Series) and axis == 0:
# sort has no effect for series concatted along axis 0
return result
else:
return result.sort_index(axis=(1 - axis)) if sort else result
# Retrieve the base types of `objs`. In order to support sub-types
# and object wrappers, we use `isinstance()` instead of comparing
# types directly
typs = set()
for o in objs:
if isinstance(o, cudf.MultiIndex):
typs.add(cudf.MultiIndex)
elif isinstance(o, cudf.BaseIndex):
typs.add(type(o))
elif isinstance(o, cudf.DataFrame):
typs.add(cudf.DataFrame)
elif isinstance(o, cudf.Series):
typs.add(cudf.Series)
else:
raise TypeError(f"cannot concatenate object of type {type(o)}")
allowed_typs = {cudf.Series, cudf.DataFrame}
# when axis is 1 (column) we can concat with Series and Dataframes
if axis == 1:
if not typs.issubset(allowed_typs):
raise TypeError(
"Can only concatenate Series and DataFrame objects when axis=1"
)
df = cudf.DataFrame()
_normalize_series_and_dataframe(objs, axis=axis)
# Inner joins involving empty data frames always return empty dfs, but
# We must delay returning until we have set the column names.
empty_inner = any(obj.empty for obj in objs) and join == "inner"
objs = [obj for obj in objs if obj.shape != (0, 0)]
if len(objs) == 0:
return df
# Don't need to align indices of all `objs` since we
# would anyway return an empty dataframe below
if not empty_inner:
objs = _align_objs(objs, how=join, sort=sort)
df.index = objs[0].index
for o in objs:
for name, col in o._data.items():
if name in df._data:
raise NotImplementedError(
f"A Column with duplicate name found: {name}, cuDF "
f"doesn't support having multiple columns with "
f"same names yet."
)
if empty_inner:
# if join is inner and it contains an empty df
# we return an empty df, hence creating an empty
# column with dtype metadata retained.
df[name] = cudf.core.column.column_empty_like(
col, newsize=0
)
else:
df[name] = col
result_columns = (
objs[0]
._data.to_pandas_index()
.append([obj._data.to_pandas_index() for obj in objs[1:]])
)
if ignore_index:
# with ignore_index the column names change to numbers
df.columns = pd.RangeIndex(len(result_columns.unique()))
else:
df.columns = result_columns.unique()
if empty_inner:
# if join is inner and it contains an empty df
# we return an empty df
return df.head(0)
return df
# If we get here, we are always concatenating along axis 0 (the rows).
typ = list(typs)[0]
if len(typs) > 1:
if allowed_typs == typs:
# This block of code will run when `objs` has
# both Series & DataFrame kind of inputs.
_normalize_series_and_dataframe(objs, axis=axis)
typ = cudf.DataFrame
else:
raise TypeError(
f"`concat` cannot concatenate objects of "
f"types: {sorted([t.__name__ for t in typs])}."
)
if typ is cudf.DataFrame:
old_objs = objs
objs = [obj for obj in objs if obj.shape != (0, 0)]
if len(objs) == 0:
# If objs is empty, that indicates all of
# objs are empty dataframes.
return cudf.DataFrame()
elif len(objs) == 1:
obj = objs[0]
result = cudf.DataFrame._from_data(
data=None if join == "inner" else obj._data.copy(deep=True),
index=cudf.RangeIndex(len(obj))
if ignore_index
else obj.index.copy(deep=True),
)
return result
else:
if join == "inner" and len(old_objs) != len(objs):
# don't filter out empty df's
objs = old_objs
result = cudf.DataFrame._concat(
objs,
axis=axis,
join=join,
ignore_index=ignore_index,
# Explicitly cast rather than relying on None being falsy.
sort=bool(sort),
)
return result
elif typ is cudf.Series:
objs = [obj for obj in objs if len(obj)]
if len(objs) == 0:
return cudf.Series()
elif len(objs) == 1 and not ignore_index:
return objs[0]
else:
return cudf.Series._concat(
objs, axis=axis, index=None if ignore_index else True
)
elif typ is cudf.MultiIndex:
return cudf.MultiIndex._concat(objs)
elif issubclass(typ, cudf.Index):
return cudf.core.index.GenericIndex._concat(objs)
else:
raise TypeError(f"cannot concatenate object of type {typ}")
def melt(
frame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
):
"""Unpivots a DataFrame from wide format to long format,
optionally leaving identifier variables set.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
default: None
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot.
default: all columns that are not set as `id_vars`.
var_name : scalar
Name to use for the `variable` column.
default: frame.columns.name or 'variable'
value_name : str
Name to use for the `value` column.
default: 'value'
Returns
-------
out : DataFrame
Melted result
Difference from pandas:
* Does not support 'col_level' because cuDF does not have multi-index
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'A': ['a', 'b', 'c'],
... 'B': [1, 3, 5],
... 'C': [2, 4, 6]})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> cudf.melt(df, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> cudf.melt(df, id_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> cudf.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
"""
if col_level is not None:
raise NotImplementedError("col_level != None is not supported yet.")
# Arg cleaning
# id_vars
if id_vars is not None:
if not isinstance(id_vars, abc.Sequence):
id_vars = [id_vars]
id_vars = list(id_vars)
missing = set(id_vars) - set(frame._column_names)
if not len(missing) == 0:
raise KeyError(
f"The following 'id_vars' are not present"
f" in the DataFrame: {list(missing)}"
)
else:
id_vars = []
# value_vars
if value_vars is not None:
if not isinstance(value_vars, abc.Sequence):
value_vars = [value_vars]
value_vars = list(value_vars)
missing = set(value_vars) - set(frame._column_names)
if not len(missing) == 0:
raise KeyError(
f"The following 'value_vars' are not present"
f" in the DataFrame: {list(missing)}"
)
else:
# then all remaining columns in frame
unique_id = set(id_vars)
value_vars = [c for c in frame._column_names if c not in unique_id]
# Error for unimplemented support for datatype
dtypes = [frame[col].dtype for col in id_vars + value_vars]
if any(cudf.api.types.is_categorical_dtype(t) for t in dtypes):
raise NotImplementedError(
"Categorical columns are not yet supported for function"
)
# Check dtype homogeneity in value_var
# Because heterogeneous concat is unimplemented
dtypes = [frame[col].dtype for col in value_vars]
if len(dtypes) > 0:
dtype = dtypes[0]
if any(t != dtype for t in dtypes):
raise ValueError("all cols in value_vars must have the same dtype")
# overlap
overlap = set(id_vars).intersection(set(value_vars))
if not len(overlap) == 0:
raise KeyError(
f"'value_vars' and 'id_vars' cannot have overlap."
f" The following 'value_vars' are ALSO present"
f" in 'id_vars': {list(overlap)}"
)
N = len(frame)
K = len(value_vars)
def _tile(A, reps):
series_list = [A] * reps
if reps > 0:
return cudf.Series._concat(objs=series_list, index=None)
else:
return cudf.Series([], dtype=A.dtype)
# Step 1: tile id_vars
mdata = {col: _tile(frame[col], K) for col in id_vars}
# Step 2: add variable
nval = len(value_vars)
dtype = min_unsigned_type(nval)
temp = cudf.Series(cupy.repeat(cupy.arange(nval, dtype=dtype), N))
if not var_name:
var_name = "variable"
mdata[var_name] = cudf.Series(
cudf.core.column.build_categorical_column(
categories=value_vars,
codes=temp._column,
mask=temp._column.base_mask,
size=temp._column.size,
offset=temp._column.offset,
ordered=False,
)
)
# Step 3: add values
mdata[value_name] = cudf.Series._concat(
objs=[frame[val] for val in value_vars], index=None
)
return cudf.DataFrame(mdata)
def get_dummies(
df,
prefix=None,
prefix_sep="_",
dummy_na=False,
columns=None,
cats=None,
sparse=False,
drop_first=False,
dtype=no_default,
):
"""Returns a dataframe whose columns are the one hot encodings of all
columns in `df`
Parameters
----------
df : array-like, Series, or DataFrame
Data of which to get dummy indicators.
prefix : str, dict, or sequence, optional
Prefix to append. Either a str (to apply a constant prefix), dict
mapping column names to prefixes, or sequence of prefixes to apply with
the same length as the number of columns. If not supplied, defaults
to the empty string
prefix_sep : str, dict, or sequence, optional, default '_'
Separator to use when appending prefixes
dummy_na : boolean, optional
Add a column to indicate Nones, if False Nones are ignored.
cats : dict, optional
Dictionary mapping column names to sequences of values representing
that column's category. If not supplied, it is computed as the unique
values of the column.
sparse : boolean, optional
Right now this is NON-FUNCTIONAL argument in rapids.
drop_first : boolean, optional
Right now this is NON-FUNCTIONAL argument in rapids.
columns : sequence of str, optional
Names of columns to encode. If not provided, will attempt to encode all
columns. Note this is different from pandas default behavior, which
encodes all columns with dtype object or categorical
dtype : str, optional
Output dtype, default 'uint8'
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"a": ["value1", "value2", None], "b": [0, 0, 0]})
>>> cudf.get_dummies(df)
b a_value1 a_value2
0 0 1 0
1 0 0 1
2 0 0 0
>>> cudf.get_dummies(df, dummy_na=True)
b a_None a_value1 a_value2
0 0 0 1 0
1 0 0 0 1
2 0 1 0 0
>>> import numpy as np
>>> df = cudf.DataFrame({"a":cudf.Series([1, 2, np.nan, None],
... nan_as_null=False)})
>>> df
a
0 1.0
1 2.0
2 NaN
3 <NA>
>>> cudf.get_dummies(df, dummy_na=True, columns=["a"])
a_1.0 a_2.0 a_nan a_null
0 1 0 0 0
1 0 1 0 0
2 0 0 1 0
3 0 0 0 1
>>> series = cudf.Series([1, 2, None, 2, 4])
>>> series
0 1
1 2
2 <NA>
3 2
4 4
dtype: int64
>>> cudf.get_dummies(series, dummy_na=True)
null 1 2 4
0 0 1 0 0
1 0 0 1 0
2 1 0 0 0
3 0 0 1 0
4 0 0 0 1
"""
if cats is None:
cats = {}
if sparse:
raise NotImplementedError("sparse is not supported yet")
if drop_first:
raise NotImplementedError("drop_first is not supported yet")
if dtype is no_default:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"Default `dtype` value will be changed to 'bool' in a future "
"release, please update `dtype='bool'` to adapt for "
"future behavior.",
FutureWarning,
)
dtype = "uint8"
if isinstance(df, cudf.DataFrame):
encode_fallback_dtypes = ["object", "category"]
if columns is None or len(columns) == 0:
columns = df.select_dtypes(
include=encode_fallback_dtypes
)._column_names
_length_check_params(prefix, columns, "prefix")
_length_check_params(prefix_sep, columns, "prefix_sep")
if prefix is None:
prefix = columns
if isinstance(prefix, str):
prefix_map = {}
elif isinstance(prefix, dict):
prefix_map = prefix
else:
prefix_map = dict(zip(columns, prefix))
if isinstance(prefix_sep, str):
prefix_sep_map = {}
elif isinstance(prefix_sep, dict):
prefix_sep_map = prefix_sep
else:
prefix_sep_map = dict(zip(columns, prefix_sep))
# If we have no columns to encode, we need to drop
# fallback columns(if any)
if len(columns) == 0:
return df.select_dtypes(exclude=encode_fallback_dtypes)
else:
result_data = {
col_name: col
for col_name, col in df._data.items()
if col_name not in columns
}
for name in columns:
if name not in cats:
unique = _get_unique(
column=df._data[name], dummy_na=dummy_na
)
else:
unique = as_column(cats[name])
col_enc_data = _one_hot_encode_column(
column=df._data[name],
categories=unique,
prefix=prefix_map.get(name, prefix),
prefix_sep=prefix_sep_map.get(name, prefix_sep),
dtype=dtype,
)
result_data.update(col_enc_data)
return cudf.DataFrame._from_data(result_data, index=df._index)
else:
ser = cudf.Series(df)
unique = _get_unique(column=ser._column, dummy_na=dummy_na)
data = _one_hot_encode_column(
column=ser._column,
categories=unique,
prefix=prefix,
prefix_sep=prefix_sep,
dtype=dtype,
)
return cudf.DataFrame._from_data(data, index=ser._index)
def _merge_sorted(
objs,
keys=None,
by_index=False,
ignore_index=False,
ascending=True,
na_position="last",
):
"""Merge a list of sorted DataFrame or Series objects.
Dataframes/Series in objs list MUST be pre-sorted by columns
listed in `keys`, or by the index (if `by_index=True`).
Parameters
----------
objs : list of DataFrame or Series
keys : list, default None
List of Column names to sort by. If None, all columns used
(Ignored if `by_index=True`)
by_index : bool, default False
Use index for sorting. `keys` input will be ignored if True
ignore_index : bool, default False
Drop and ignore index during merge. Default range index will
be used in the output dataframe.
ascending : bool, default True
Sorting is in ascending order, otherwise it is descending
na_position : {'first', 'last'}, default 'last'
'first' nulls at the beginning, 'last' nulls at the end
Returns
-------
A new, lexicographically sorted, DataFrame/Series.
"""
if not pd.api.types.is_list_like(objs):
raise TypeError("objs must be a list-like of Frame-like objects")
if len(objs) < 1:
raise ValueError("objs must be non-empty")
if not all(isinstance(table, cudf.core.frame.Frame) for table in objs):
raise TypeError("Elements of objs must be Frame-like")
if len(objs) == 1:
return objs[0]
if by_index and ignore_index:
raise ValueError("`by_index` and `ignore_index` cannot both be True")
if by_index:
key_columns_indices = list(range(0, objs[0]._index.nlevels))
else:
if keys is None:
key_columns_indices = list(range(0, objs[0]._num_columns))
else:
key_columns_indices = [
objs[0]._column_names.index(key) for key in keys
]
if not ignore_index:
key_columns_indices = [
idx + objs[0]._index.nlevels for idx in key_columns_indices
]
columns = [
[
*(obj._index._data.columns if not ignore_index else ()),
*obj._columns,
]
for obj in objs
]
return objs[0]._from_columns_like_self(
cudf._lib.merge.merge_sorted(
input_columns=columns,
key_columns_indices=key_columns_indices,
ascending=ascending,
na_position=na_position,
),
column_names=objs[0]._column_names,
index_names=None if ignore_index else objs[0]._index_names,
)
def _pivot(df, index, columns):
"""
Reorganize the values of the DataFrame according to the given
index and columns.
Parameters
----------
df : DataFrame
index : cudf.Index
Index labels of the result
columns : cudf.Index
Column labels of the result
"""
columns_labels, columns_idx = columns._encode()
index_labels, index_idx = index._encode()
column_labels = columns_labels.to_pandas().to_flat_index()
# the result of pivot always has a multicolumn
result = cudf.core.column_accessor.ColumnAccessor(
multiindex=True, level_names=(None,) + columns._data.names
)
def as_tuple(x):
return x if isinstance(x, tuple) else (x,)
for v in df:
names = [as_tuple(v) + as_tuple(name) for name in column_labels]
nrows = len(index_labels)
ncols = len(names)
num_elements = nrows * ncols
if num_elements > 0:
col = df._data[v]
scatter_map = (columns_idx * np.int32(nrows)) + index_idx
target = cudf.DataFrame._from_data(
{
None: cudf.core.column.column_empty_like(
col, masked=True, newsize=nrows * ncols
)
}
)
target._data[None][scatter_map] = col
result_frames = target._split(range(nrows, nrows * ncols, nrows))
result.update(
{
name: next(iter(f._columns))
for name, f in zip(names, result_frames)
}
)
return cudf.DataFrame._from_data(
result, index=cudf.Index(index_labels, name=index.name)
)
def pivot(data, index=None, columns=None, values=None):
"""
Return reshaped DataFrame organized by the given index and column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame.
Parameters
----------
index : column name, optional
Column used to construct the index of the result.
columns : column name, optional
Column used to construct the columns of the result.
values : column name or list of column names, optional
Column(s) whose values are rearranged to produce the result.
If not specified, all remaining columns of the DataFrame
are used.
Returns
-------
DataFrame
Examples
--------
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2, 2]
>>> a['b'] = ['a', 'b', 'a', 'b']
>>> a['c'] = [1, 2, 3, 4]
>>> a.pivot(index='a', columns='b')
c
b a b
a
1 1 2
2 3 4
Pivot with missing values in result:
>>> a = cudf.DataFrame()
>>> a['a'] = [1, 1, 2]
>>> a['b'] = [1, 2, 3]
>>> a['c'] = ['one', 'two', 'three']
>>> a.pivot(index='a', columns='b')
c
b 1 2 3
a
1 one two <NA>
2 <NA> <NA> three
"""
df = data
values_is_list = True
if values is None:
values = df._columns_view(
col for col in df._column_names if col not in (index, columns)
)
else:
if not isinstance(values, (list, tuple)):
values = [values]
values_is_list = False
values = df._columns_view(values)
if index is None:
index = df.index
else:
index = cudf.core.index.Index(df.loc[:, index])
columns = cudf.Index(df.loc[:, columns])
# Create a DataFrame composed of columns from both
# columns and index
columns_index = {}
columns_index = {
i: col
for i, col in enumerate(
itertools.chain(index._data.columns, columns._data.columns)
)
}
columns_index = cudf.DataFrame(columns_index)
# Check that each row is unique:
if len(columns_index) != len(columns_index.drop_duplicates()):
raise ValueError("Duplicate index-column pairs found. Cannot reshape.")
result = _pivot(values, index, columns)
# MultiIndex to Index
if not values_is_list:
result._data.droplevel(0)
return result
def unstack(df, level, fill_value=None):
"""
Pivot one or more levels of the (necessarily hierarchical) index labels.
Pivots the specified levels of the index labels of df to the innermost
levels of the columns labels of the result.
* If the index of ``df`` has multiple levels, returns a ``Dataframe`` with
specified level of the index pivoted to the column levels.
* If the index of ``df`` has single level, returns a ``Series`` with all
column levels pivoted to the index levels.
Parameters
----------
df : DataFrame
level : level name or index, list-like
Integer, name or list of such, specifying one or more
levels of the index to pivot
fill_value
Non-functional argument provided for compatibility with Pandas.
Returns
-------
Series or DataFrame
Examples
--------
>>> df = cudf.DataFrame()
>>> df['a'] = [1, 1, 1, 2, 2]
>>> df['b'] = [1, 2, 3, 1, 2]
>>> df['c'] = [5, 6, 7, 8, 9]
>>> df['d'] = ['a', 'b', 'a', 'd', 'e']
>>> df = df.set_index(['a', 'b', 'd'])
>>> df
c
a b d
1 1 a 5
2 b 6
3 a 7
2 1 d 8
2 e 9
Unstacking level 'a':
>>> df.unstack('a')
c
a 1 2
b d
1 a 5 <NA>
d <NA> 8
2 b 6 <NA>
e <NA> 9
3 a 7 <NA>
Unstacking level 'd' :
>>> df.unstack('d')
c
d a b d e
a b
1 1 5 <NA> <NA> <NA>
2 <NA> 6 <NA> <NA>
3 7 <NA> <NA> <NA>
2 1 <NA> <NA> 8 <NA>
2 <NA> <NA> <NA> 9
Unstacking multiple levels:
>>> df.unstack(['b', 'd'])
c
b 1 2 3
d a d b e a
a
1 5 <NA> 6 <NA> 7
2 <NA> 8 <NA> 9 <NA>
Unstacking single level index dataframe:
>>> df = cudf.DataFrame({('c', 1): [1, 2, 3], ('c', 2):[9, 8, 7]})
>>> df.unstack()
c 1 0 1
1 2
2 3
2 0 9
1 8
2 7
dtype: int64
"""
if not isinstance(df, cudf.DataFrame):
raise ValueError("`df` should be a cudf Dataframe object.")
if df.empty:
raise ValueError("Cannot unstack an empty dataframe.")
if fill_value is not None:
raise NotImplementedError("fill_value is not supported.")
if pd.api.types.is_list_like(level):
if not level:
return df
df = df.copy(deep=False)
if not isinstance(df.index, cudf.MultiIndex):
dtype = df._columns[0].dtype
for col in df._columns:
if not col.dtype == dtype:
raise ValueError(
"Calling unstack() on single index dataframe"
" with different column datatype is not supported."
)
res = df.T.stack(dropna=False)
# Result's index is a multiindex
res.index.names = (
tuple(df._data.to_pandas_index().names) + df.index.names
)
return res
else:
columns = df.index._poplevels(level)
index = df.index
result = _pivot(df, index, columns)
if result.index.nlevels == 1:
result.index = result.index.get_level_values(result.index.names[0])
return result
def _get_unique(column, dummy_na):
"""
Returns unique values in a column, if
dummy_na is False, nan's are also dropped.
"""
if isinstance(column, cudf.core.column.CategoricalColumn):
unique = column.categories
else:
unique = column.unique().sort_values()
if not dummy_na:
if np.issubdtype(unique.dtype, np.floating):
unique = unique.nans_to_nulls()
unique = unique.dropna()
return unique
def _one_hot_encode_column(
column: ColumnBase,
categories: ColumnBase,
prefix: Optional[str],
prefix_sep: Optional[str],
dtype: Optional[Dtype],
) -> Dict[str, ColumnBase]:
"""Encode a single column with one hot encoding. The return dictionary
contains pairs of (category, encodings). The keys may be prefixed with
`prefix`, separated with category name with `prefix_sep`. The encoding
columns maybe coerced into `dtype`.
"""
if isinstance(column, CategoricalColumn):
if column.size == column.null_count:
column = column_empty_like(categories, newsize=column.size)
else:
column = column._get_decategorized_column()
if column.size * categories.size >= np.iinfo(size_type_dtype).max:
raise ValueError(
"Size limitation exceeded: column.size * category.size < "
f"np.iinfo({size_type_dtype}).max. Consider reducing "
"size of category"
)
data = one_hot_encode(column, categories)
if prefix is not None and prefix_sep is not None:
data = {f"{prefix}{prefix_sep}{col}": enc for col, enc in data.items()}
if dtype:
data = {k: v.astype(dtype) for k, v in data.items()}
return data
def _length_check_params(obj, columns, name):
if cudf.api.types.is_list_like(obj):
if len(obj) != len(columns):
raise ValueError(
f"Length of '{name}' ({len(obj)}) did not match the "
f"length of the columns being "
f"encoded ({len(columns)})."
)
def _get_pivot_names(arrs, names, prefix):
"""
Generates unique names for rows/columns
"""
if names is None:
names = []
for i, arr in enumerate(arrs):
if isinstance(arr, cudf.Series) and arr.name is not None:
names.append(arr.name)
else:
names.append(f"{prefix}_{i}")
else:
if len(names) != len(arrs):
raise ValueError("arrays and names must have the same length")
if not isinstance(names, list):
names = list(names)
return names
def crosstab(
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins=False,
margins_name="All",
dropna=None,
normalize=False,
):
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : list of str, default None
If passed, must match number of row arrays passed.
colnames : list of str, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : Not supported
margins_name : Not supported
dropna : Not supported
normalize : Not supported
Returns
-------
DataFrame
Cross tabulation of the data.
Examples
--------
>>> a = cudf.Series(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = cudf.Series(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = cudf.Series(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> cudf.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
"""
if normalize is not False:
raise NotImplementedError("normalize is not supported yet")
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
if not isinstance(index, (list, tuple)):
index = [index]
if not isinstance(columns, (list, tuple)):
columns = [columns]
if not rownames:
rownames = _get_pivot_names(index, rownames, prefix="row")
if not colnames:
colnames = _get_pivot_names(columns, colnames, prefix="col")
if len(index) != len(rownames):
raise ValueError("index and rownames must have same length")
if len(columns) != len(colnames):
raise ValueError("columns and colnames must have same length")
if len(set(rownames)) != len(rownames):
raise ValueError("rownames must be unique")
if len(set(colnames)) != len(colnames):
raise ValueError("colnames must be unique")
data = {
**dict(zip(rownames, map(as_column, index))),
**dict(zip(colnames, map(as_column, columns))),
}
df = cudf.DataFrame._from_data(data)
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": "count", "fill_value": 0}
else:
df["__dummy__"] = values
kwargs = {"aggfunc": aggfunc}
table = pivot_table(
data=df,
index=rownames,
columns=colnames,
values="__dummy__",
margins=margins,
margins_name=margins_name,
dropna=dropna,
**kwargs,
)
return table
def pivot_table(
data,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=None,
margins_name="All",
observed=False,
sort=True,
):
"""
Create a spreadsheet-style pivot table as a DataFrame.
Parameters
----------
data : DataFrame
values : column name or list of column names to aggregate, optional
index : list of column names
Values to group by in the rows.
columns : list of column names
Values to group by in the columns.
aggfunc : str or dict, default "mean"
If dict is passed, the key is column to aggregate
and value is function name.
fill_value : scalar, default None
Value to replace missing values with
(in the resulting pivot table, after aggregation).
margins : Not supported
dropna : Not supported
margins_name : Not supported
observed : Not supported
sort : Not supported
Returns
-------
DataFrame
An Excel style pivot table.
"""
if margins is not False:
raise NotImplementedError("margins is not supported yet")
if margins_name != "All":
raise NotImplementedError("margins_name is not supported yet")
if dropna is not None:
raise NotImplementedError("dropna is not supported yet")
if observed is not False:
raise NotImplementedError("observed is not supported yet")
if sort is not True:
raise NotImplementedError("sort is not supported yet")
keys = index + columns
values_passed = values is not None
if values_passed:
if pd.api.types.is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, cudf.Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data._column_names):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys)
agged = grouped.agg(aggfunc)
table = agged
if table.index.nlevels > 1 and index:
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if fill_value is not None:
table = table.fillna(fill_value)
# discard the top level
if values_passed and not values_multi and table._data.multiindex:
column_names = table._data.level_names[1:]
table_columns = tuple(
map(lambda column: column[1:], table._data.names)
)
table.columns = cudf.MultiIndex.from_tuples(
tuples=table_columns, names=column_names
)
if len(index) == 0 and len(columns) > 0:
table = table.T
return table
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/multiindex.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
from __future__ import annotations
import itertools
import numbers
import operator
import pickle
import warnings
from collections import abc
from functools import cached_property
from numbers import Integral
from typing import Any, List, MutableMapping, Tuple, Union
import cupy as cp
import numpy as np
import pandas as pd
from pandas._config import get_option
import cudf
from cudf import _lib as libcudf
from cudf._typing import DataFrameOrSeries
from cudf.api.extensions import no_default
from cudf.api.types import is_integer, is_list_like, is_object_dtype
from cudf.core import column
from cudf.core._compat import PANDAS_GE_150
from cudf.core.frame import Frame
from cudf.core.index import BaseIndex, _lexsorted_equal_range, as_index
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import NotIterable, _external_only_api, _is_same_name
def _maybe_indices_to_slice(indices: cp.ndarray) -> Union[slice, cp.ndarray]:
"""Makes best effort to convert an array of indices into a python slice.
If the conversion is not possible, return input. `indices` are expected
to be valid.
"""
# TODO: improve efficiency by avoiding sync.
if len(indices) == 1:
x = indices[0].item()
return slice(x, x + 1)
if len(indices) == 2:
x1, x2 = indices[0].item(), indices[1].item()
return slice(x1, x2 + 1, x2 - x1)
start, step = indices[0].item(), (indices[1] - indices[0]).item()
stop = start + step * len(indices)
if (indices == cp.arange(start, stop, step)).all():
return slice(start, stop, step)
return indices
class MultiIndex(Frame, BaseIndex, NotIterable):
"""A multi-level or hierarchical index.
Provides N-Dimensional indexing into Series and DataFrame objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes: sequence of arrays
Integers for each level designating which label at each location.
sortorder : optional int
Not yet supported
names: optional sequence of objects
Names for each of the index levels.
copy : bool, default False
Copy the levels and codes.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Not yet supported
Attributes
----------
names
nlevels
dtypes
levels
codes
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
get_level_values
get_loc
drop
Returns
-------
MultiIndex
Examples
--------
>>> import cudf
>>> cudf.MultiIndex(
... levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]])
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
)
"""
@_cudf_nvtx_annotate
def __init__(
self,
levels=None,
codes=None,
sortorder=None,
names=None,
dtype=None,
copy=False,
name=None,
**kwargs,
):
if sortorder is not None:
raise NotImplementedError("sortorder is not yet supported")
if name is not None:
raise NotImplementedError(
"Use `names`, `name` is not yet supported"
)
if len(levels) == 0:
raise ValueError("Must pass non-zero number of levels/codes")
if not isinstance(codes, cudf.DataFrame) and not isinstance(
codes[0], (abc.Sequence, np.ndarray, cp.ndarray)
):
raise TypeError("Codes is not a Sequence of sequences")
if copy:
if isinstance(codes, cudf.DataFrame):
codes = codes.copy(deep=True)
if len(levels) > 0 and isinstance(levels[0], cudf.Series):
levels = [level.copy(deep=True) for level in levels]
if not isinstance(codes, cudf.DataFrame):
if len(levels) == len(codes):
codes = cudf.DataFrame._from_data(
{
i: column.as_column(code).astype(np.int64)
for i, code in enumerate(codes)
}
)
else:
raise ValueError(
"MultiIndex has unequal number of levels and "
"codes and is inconsistent!"
)
levels = [cudf.Series(level) for level in levels]
if len(levels) != len(codes._data):
raise ValueError(
"MultiIndex has unequal number of levels and "
"codes and is inconsistent!"
)
if len({c.size for c in codes._data.columns}) != 1:
raise ValueError(
"MultiIndex length of codes does not match "
"and is inconsistent!"
)
for level, code in zip(levels, codes._data.columns):
if code.max() > len(level) - 1:
raise ValueError(
"MultiIndex code %d contains value %d larger "
"than maximum level size at this position"
)
source_data = {}
for i, (column_name, col) in enumerate(codes._data.items()):
if -1 in col:
level = cudf.DataFrame(
{column_name: [None] + list(levels[i])},
index=range(-1, len(levels[i])),
)
else:
level = cudf.DataFrame({column_name: levels[i]})
source_data[column_name] = libcudf.copying.gather(
[level._data[column_name]], col
)[0]
super().__init__(source_data)
self._levels = levels
self._codes = codes
self._name = None
self.names = names
@property # type: ignore
@_cudf_nvtx_annotate
def names(self):
return self._names
@names.setter # type: ignore
@_cudf_nvtx_annotate
def names(self, value):
if value is None:
value = [None] * self.nlevels
elif not is_list_like(value):
raise ValueError("Names should be list-like for a MultiIndex")
elif len(value) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if len(value) == len(set(value)):
# IMPORTANT: if the provided names are unique,
# we reconstruct self._data with the names as keys.
# If they are not unique, the keys of self._data
# and self._names will be different, which can lead
# to unexpected behavior in some cases. This is
# definitely buggy, but we can't disallow non-unique
# names either...
self._data = self._data.__class__._create_unsafe(
dict(zip(value, self._data.values())),
level_names=self._data.level_names,
)
self._names = pd.core.indexes.frozen.FrozenList(value)
@_cudf_nvtx_annotate
def to_series(self, index=None, name=None):
raise NotImplementedError(
"MultiIndex.to_series isn't implemented yet."
)
@_cudf_nvtx_annotate
def astype(self, dtype, copy: bool = True):
if not is_object_dtype(dtype):
raise TypeError(
"Setting a MultiIndex dtype to anything other than object is "
"not supported"
)
return self
@_cudf_nvtx_annotate
def rename(self, names, inplace=False):
"""
Alter MultiIndex level names
Parameters
----------
names : list of label
Names to set, length must be the same as number of levels
inplace : bool, default False
If True, modifies objects directly, otherwise returns a new
``MultiIndex`` instance
Returns
-------
None or MultiIndex
Examples
--------
Renaming each levels of a MultiIndex to specified name:
>>> midx = cudf.MultiIndex.from_product(
... [('A', 'B'), (2020, 2021)], names=['c1', 'c2'])
>>> midx.rename(['lv1', 'lv2'])
MultiIndex([('A', 2020),
('A', 2021),
('B', 2020),
('B', 2021)],
names=['lv1', 'lv2'])
>>> midx.rename(['lv1', 'lv2'], inplace=True)
>>> midx
MultiIndex([('A', 2020),
('A', 2021),
('B', 2020),
('B', 2021)],
names=['lv1', 'lv2'])
``names`` argument must be a list, and must have same length as
``MultiIndex.levels``:
>>> midx.rename(['lv0'])
Traceback (most recent call last):
ValueError: Length of names must match number of levels in MultiIndex.
"""
return self.set_names(names, level=None, inplace=inplace)
@_cudf_nvtx_annotate
def set_names(self, names, level=None, inplace=False):
names_is_list_like = is_list_like(names)
level_is_list_like = is_list_like(level)
if level is not None and not level_is_list_like and names_is_list_like:
raise TypeError(
"Names must be a string when a single level is provided."
)
if not names_is_list_like and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not names_is_list_like:
names = [names]
if level is not None and not level_is_list_like:
level = [level]
if level is not None and len(names) != len(level):
raise ValueError("Length of names must match length of level.")
if level is None and len(names) != self.nlevels:
raise ValueError(
"Length of names must match number of levels in MultiIndex."
)
if level is None:
level = range(self.nlevels)
else:
level = [self._level_index_from_level(lev) for lev in level]
existing_names = list(self.names)
for i, lev in enumerate(level):
existing_names[lev] = names[i]
names = existing_names
return self._set_names(names=names, inplace=inplace)
@classmethod
@_cudf_nvtx_annotate
def _from_data(
cls,
data: MutableMapping,
name: Any = None,
) -> MultiIndex:
obj = cls.from_frame(cudf.DataFrame._from_data(data=data))
if name is not None:
obj.name = name
return obj
@property # type: ignore
@_cudf_nvtx_annotate
def name(self):
return self._name
@name.setter # type: ignore
@_cudf_nvtx_annotate
def name(self, value):
self._name = value
@_cudf_nvtx_annotate
def copy(
self,
names=None,
dtype=None,
levels=None,
codes=None,
deep=False,
name=None,
):
"""Returns copy of MultiIndex object.
Returns a copy of `MultiIndex`. The `levels` and `codes` value can be
set to the provided parameters. When they are provided, the returned
MultiIndex is always newly constructed.
Parameters
----------
names : sequence of objects, optional (default None)
Names for each of the index levels.
dtype : object, optional (default None)
MultiIndex dtype, only supports None or object type
.. deprecated:: 23.02
The `dtype` parameter is deprecated and will be removed in
a future version of cudf. Use the `astype` method instead.
levels : sequence of arrays, optional (default None)
The unique labels for each level. Original values used if None.
.. deprecated:: 23.02
The `levels` parameter is deprecated and will be removed in
a future version of cudf.
codes : sequence of arrays, optional (default None)
Integers for each level designating which label at each location.
Original values used if None.
.. deprecated:: 23.02
The `codes` parameter is deprecated and will be removed in
a future version of cudf.
deep : Bool (default False)
If True, `._data`, `._levels`, `._codes` will be copied. Ignored if
`levels` or `codes` are specified.
name : object, optional (default None)
To keep consistent with `Index.copy`, should not be used.
Returns
-------
Copy of MultiIndex Instance
Examples
--------
>>> df = cudf.DataFrame({'Close': [3400.00, 226.58, 3401.80, 228.91]})
>>> idx1 = cudf.MultiIndex(
... levels=[['2020-08-27', '2020-08-28'], ['AMZN', 'MSFT']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
... names=['Date', 'Symbol'])
>>> idx2 = idx1.copy(
... levels=[['day1', 'day2'], ['com1', 'com2']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
... names=['col1', 'col2'])
>>> df.index = idx1
>>> df
Close
Date Symbol
2020-08-27 AMZN 3400.00
MSFT 226.58
2020-08-28 AMZN 3401.80
MSFT 228.91
>>> df.index = idx2
>>> df
Close
col1 col2
day1 com1 3400.00
com2 226.58
day2 com1 3401.80
com2 228.91
"""
# TODO: Update message when set_levels is implemented.
# https://github.com/rapidsai/cudf/issues/12307
if levels is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter levels is deprecated and will be removed in a "
"future version.",
FutureWarning,
)
# TODO: Update message when set_codes is implemented.
# https://github.com/rapidsai/cudf/issues/12308
if codes is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter codes is deprecated and will be removed in a "
"future version.",
FutureWarning,
)
if dtype is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter dtype is deprecated and will be removed in a "
"future version. Use the astype method instead.",
FutureWarning,
)
dtype = object if dtype is None else dtype
if not pd.api.types.is_object_dtype(dtype):
raise TypeError("Dtype for MultiIndex only supports object type.")
# ._data needs to be rebuilt
if levels is not None or codes is not None:
if self._levels is None or self._codes is None:
self._compute_levels_and_codes()
levels = self._levels if levels is None else levels
codes = self._codes if codes is None else codes
names = self.names if names is None else names
mi = MultiIndex(levels=levels, codes=codes, names=names, copy=deep)
return mi
mi = MultiIndex._from_data(self._data.copy(deep=deep))
if self._levels is not None:
mi._levels = [s.copy(deep) for s in self._levels]
if self._codes is not None:
mi._codes = self._codes.copy(deep)
if names is not None:
mi.names = names
elif self.names is not None:
mi.names = self.names.copy()
return mi
@_cudf_nvtx_annotate
def __repr__(self):
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
n = int(max_seq_items / 2) + 1
# TODO: Update the following two arange calls to
# a single arange call once arange has support for
# a vector start/end points.
indices = column.arange(start=0, stop=n, step=1)
indices = indices.append(
column.arange(start=len(self) - n, stop=len(self), step=1)
)
preprocess = self.take(indices)
else:
preprocess = self.copy(deep=False)
if any(col.has_nulls() for col in preprocess._data.columns):
preprocess_df = preprocess.to_frame(index=False)
for name, col in preprocess._data.items():
if isinstance(
col,
(
column.datetime.DatetimeColumn,
column.timedelta.TimeDeltaColumn,
),
):
preprocess_df[name] = col.astype("str").fillna(
str(cudf.NaT)
)
tuples_list = list(
zip(
*list(
map(lambda val: pd.NA if val is None else val, col)
for col in preprocess_df.to_arrow()
.to_pydict()
.values()
)
)
)
if not PANDAS_GE_150:
# Need this whole `if` block,
# this is a workaround for the following issue:
# https://github.com/pandas-dev/pandas/issues/39984
preprocess_pdf = pd.DataFrame(
{
name: col.to_pandas(nullable=(col.dtype.kind != "f"))
for name, col in preprocess._data.items()
}
)
preprocess_pdf.columns = preprocess.names
preprocess = pd.MultiIndex.from_frame(preprocess_pdf)
else:
preprocess = preprocess.to_pandas(nullable=True)
preprocess.values[:] = tuples_list
else:
preprocess = preprocess.to_pandas(nullable=True)
output = repr(preprocess)
output_prefix = self.__class__.__name__ + "("
output = output.lstrip(output_prefix)
lines = output.split("\n")
if len(lines) > 1:
if "length=" in lines[-1] and len(self) != len(preprocess):
last_line = lines[-1]
length_index = last_line.index("length=")
last_line = last_line[:length_index] + f"length={len(self)})"
lines = lines[:-1]
lines.append(last_line)
data_output = "\n".join(lines)
return output_prefix + data_output
@property
def _codes_frame(self):
if self._codes is None:
self._compute_levels_and_codes()
return self._codes
@property # type: ignore
@_external_only_api("Use ._codes_frame instead")
@_cudf_nvtx_annotate
def codes(self):
"""
Returns the codes of the underlying MultiIndex.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a':[1, 2, 3], 'b':[10, 11, 12]})
>>> midx = cudf.MultiIndex.from_frame(df)
>>> midx
MultiIndex([(1, 10),
(2, 11),
(3, 12)],
names=['a', 'b'])
>>> midx.codes
FrozenList([[0, 1, 2], [0, 1, 2]])
"""
return pd.core.indexes.frozen.FrozenList(
col.values for col in self._codes_frame._columns
)
def get_slice_bound(self, label, side, kind=None):
raise NotImplementedError()
@property # type: ignore
@_cudf_nvtx_annotate
def nlevels(self):
"""Integer number of levels in this MultiIndex."""
return len(self._data)
@property # type: ignore
@_cudf_nvtx_annotate
def levels(self):
"""
Returns list of levels in the MultiIndex
Returns
-------
List of Series objects
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a':[1, 2, 3], 'b':[10, 11, 12]})
>>> cudf.MultiIndex.from_frame(df)
MultiIndex([(1, 10),
(2, 11),
(3, 12)],
names=['a', 'b'])
>>> midx = cudf.MultiIndex.from_frame(df)
>>> midx
MultiIndex([(1, 10),
(2, 11),
(3, 12)],
names=['a', 'b'])
>>> midx.levels
[Int64Index([1, 2, 3], dtype='int64', name='a'), Int64Index([10, 11, 12], dtype='int64', name='b')]
""" # noqa: E501
if self._levels is None:
self._compute_levels_and_codes()
return self._levels
@property # type: ignore
@_cudf_nvtx_annotate
def ndim(self):
"""Dimension of the data. For MultiIndex ndim is always 2."""
return 2
@_cudf_nvtx_annotate
def _get_level_label(self, level):
"""Get name of the level.
Parameters
----------
level : int or level name
if level is name, it will be returned as it is
else if level is index of the level, then level
label will be returned as per the index.
"""
if level in self._data.names:
return level
else:
return self._data.names[level]
@_cudf_nvtx_annotate
def isin(self, values, level=None):
"""Return a boolean array where the index values are in values.
Compute boolean array of whether each index value is found in
the passed set of values. The length of the returned boolean
array matches the length of the index.
Parameters
----------
values : set, list-like, Index or Multi-Index
Sought values.
level : str or int, optional
Name or position of the index level to use (if the index
is a MultiIndex).
Returns
-------
is_contained : cupy array
CuPy array of boolean values.
Notes
-----
When `level` is None, `values` can only be MultiIndex, or a
set/list-like tuples.
When `level` is provided, `values` can be Index or MultiIndex,
or a set/list-like tuples.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> midx = cudf.from_pandas(pd.MultiIndex.from_arrays([[1,2,3],
... ['red', 'blue', 'green']],
... names=('number', 'color')))
>>> midx
MultiIndex([(1, 'red'),
(2, 'blue'),
(3, 'green')],
names=['number', 'color'])
Check whether the strings in the 'color' level of the MultiIndex
are in a list of colors.
>>> midx.isin(['red', 'orange', 'yellow'], level='color')
array([ True, False, False])
To check across the levels of a MultiIndex, pass a list of tuples:
>>> midx.isin([(1, 'red'), (3, 'red')])
array([ True, False, False])
"""
if level is None:
if isinstance(values, cudf.MultiIndex):
values_idx = values
elif (
(
isinstance(
values,
(
cudf.Series,
cudf.Index,
cudf.DataFrame,
column.ColumnBase,
),
)
)
or (not is_list_like(values))
or (
is_list_like(values)
and len(values) > 0
and not isinstance(values[0], tuple)
)
):
raise TypeError(
"values need to be a Multi-Index or set/list-like tuple "
"squences when `level=None`."
)
else:
values_idx = cudf.MultiIndex.from_tuples(
values, names=self.names
)
self_df = self.to_frame(index=False).reset_index()
values_df = values_idx.to_frame(index=False)
idx = self_df.merge(values_df)._data["index"]
res = cudf.core.column.full(size=len(self), fill_value=False)
res[idx] = True
result = res.values
else:
level_series = self.get_level_values(level)
result = level_series.isin(values)
return result
def where(self, cond, other=None, inplace=False):
raise NotImplementedError(
".where is not supported for MultiIndex operations"
)
@_cudf_nvtx_annotate
def _compute_levels_and_codes(self):
levels = []
codes = {}
for name, col in self._data.items():
with warnings.catch_warnings():
# TODO: Remove this filter when
# `na_sentinel` is removed from `factorize`.
# This is a filter to not let the warnings from
# `factorize` show up in other parts of public APIs.
warnings.simplefilter("ignore")
code, cats = cudf.Series._from_data({None: col}).factorize()
cats.name = name
codes[name] = code.astype(np.int64)
levels.append(cats)
self._levels = levels
self._codes = cudf.DataFrame._from_data(codes)
@_cudf_nvtx_annotate
def _compute_validity_mask(self, index, row_tuple, max_length):
"""Computes the valid set of indices of values in the lookup"""
lookup = cudf.DataFrame()
for i, row in enumerate(row_tuple):
if isinstance(row, slice) and row == slice(None):
continue
lookup[i] = cudf.Series(row)
frame = cudf.DataFrame(dict(enumerate(index._data.columns)))
data_table = cudf.concat(
[
frame,
cudf.DataFrame(
{"idx": cudf.Series(column.arange(len(frame)))}
),
],
axis=1,
)
# Sort indices in pandas compatible mode
# because we want the indices to be fetched
# in a deterministic order.
# TODO: Remove this after merge/join
# obtain deterministic ordering.
if cudf.get_option("mode.pandas_compatible"):
lookup_order = "_" + "_".join(map(str, lookup._data.names))
lookup[lookup_order] = column.arange(len(lookup))
postprocess = operator.methodcaller(
"sort_values", by=[lookup_order, "idx"]
)
else:
postprocess = lambda r: r # noqa: E731
result = postprocess(lookup.merge(data_table))["idx"]
# Avoid computing levels unless the result of the merge is empty,
# which suggests that a KeyError should be raised.
if len(result) == 0:
for idx, row in enumerate(row_tuple):
if row == slice(None):
continue
if row not in index.levels[idx]._column:
raise KeyError(row)
return result
@_cudf_nvtx_annotate
def _get_valid_indices_by_tuple(self, index, row_tuple, max_length):
# Instructions for Slicing
# if tuple, get first and last elements of tuple
# if open beginning tuple, get 0 to highest valid_index
# if open ending tuple, get highest valid_index to len()
# if not open end or beginning, get range lowest beginning index
# to highest ending index
if isinstance(row_tuple, slice):
if (
isinstance(row_tuple.start, numbers.Number)
or isinstance(row_tuple.stop, numbers.Number)
or row_tuple == slice(None)
):
stop = row_tuple.stop or max_length
start, stop, step = row_tuple.indices(stop)
return column.arange(start, stop, step)
start_values = self._compute_validity_mask(
index, row_tuple.start, max_length
)
stop_values = self._compute_validity_mask(
index, row_tuple.stop, max_length
)
return column.arange(start_values.min(), stop_values.max() + 1)
elif isinstance(row_tuple, numbers.Number):
return row_tuple
return self._compute_validity_mask(index, row_tuple, max_length)
@_cudf_nvtx_annotate
def _index_and_downcast(self, result, index, index_key):
if isinstance(index_key, (numbers.Number, slice)):
index_key = [index_key]
if (
len(index_key) > 0 and not isinstance(index_key, tuple)
) or isinstance(index_key[0], slice):
index_key = index_key[0]
slice_access = isinstance(index_key, slice)
out_index = cudf.DataFrame()
# Select the last n-k columns where n is the number of columns and k is
# the length of the indexing tuple
size = 0
if not isinstance(index_key, (numbers.Number, slice)):
size = len(index_key)
for k in range(size, len(index._data)):
out_index.insert(
out_index._num_columns,
k,
cudf.Series._from_data({None: index._data.columns[k]}),
)
# determine if we should downcast from a DataFrame to a Series
need_downcast = (
isinstance(result, cudf.DataFrame)
and len(result) == 1 # only downcast if we have a single row
and not slice_access # never downcast if we sliced
and (
size == 0 # index_key was an integer
# we indexed into a single row directly, using its label:
or len(index_key) == self.nlevels
)
)
if need_downcast:
result = result.T
return result[result._data.names[0]]
if len(result) == 0 and not slice_access:
# Pandas returns an empty Series with a tuple as name
# the one expected result column
result = cudf.Series._from_data(
{}, name=tuple(col[0] for col in index._data.columns)
)
elif out_index._num_columns == 1:
# If there's only one column remaining in the output index, convert
# it into an Index and name the final index values according
# to that column's name.
*_, last_column = index._data.columns
out_index = as_index(last_column)
out_index.name = index.names[-1]
index = out_index
elif out_index._num_columns > 1:
# Otherwise pop the leftmost levels, names, and codes from the
# source index until it has the correct number of columns (n-k)
result.reset_index(drop=True)
if index.names is not None:
result.names = index.names[size:]
index = MultiIndex(
levels=index.levels[size:],
codes=index._codes_frame.iloc[:, size:],
names=index.names[size:],
)
if isinstance(index_key, tuple):
result.index = index
return result
@_cudf_nvtx_annotate
def _get_row_major(
self,
df: DataFrameOrSeries,
row_tuple: Union[
numbers.Number, slice, Tuple[Any, ...], List[Tuple[Any, ...]]
],
) -> DataFrameOrSeries:
if pd.api.types.is_bool_dtype(
list(row_tuple) if isinstance(row_tuple, tuple) else row_tuple
):
return df[row_tuple]
if isinstance(row_tuple, slice):
if row_tuple.start is None:
row_tuple = slice(self[0], row_tuple.stop, row_tuple.step)
if row_tuple.stop is None:
row_tuple = slice(row_tuple.start, self[-1], row_tuple.step)
self._validate_indexer(row_tuple)
valid_indices = self._get_valid_indices_by_tuple(
df.index, row_tuple, len(df.index)
)
indices = cudf.Series(valid_indices)
result = df.take(indices)
final = self._index_and_downcast(result, result.index, row_tuple)
return final
@_cudf_nvtx_annotate
def _validate_indexer(
self,
indexer: Union[
numbers.Number, slice, Tuple[Any, ...], List[Tuple[Any, ...]]
],
):
if isinstance(indexer, numbers.Number):
return
if isinstance(indexer, tuple):
# drop any slice(None) from the end:
indexer = tuple(
itertools.dropwhile(
lambda x: x == slice(None), reversed(indexer)
)
)[::-1]
# now check for size
if len(indexer) > self.nlevels:
raise IndexError("Indexer size exceeds number of levels")
elif isinstance(indexer, slice):
self._validate_indexer(indexer.start)
self._validate_indexer(indexer.stop)
else:
for i in indexer:
self._validate_indexer(i)
@_cudf_nvtx_annotate
def __eq__(self, other):
if isinstance(other, MultiIndex):
return np.array(
[
self_col.equals(other_col)
for self_col, other_col in zip(
self._data.values(), other._data.values()
)
]
)
return NotImplemented
@property # type: ignore
@_cudf_nvtx_annotate
def size(self):
# The size of a MultiIndex is only dependent on the number of rows.
return self._num_rows
@_cudf_nvtx_annotate
def take(self, indices):
if isinstance(indices, cudf.Series) and indices.has_nulls:
raise ValueError("Column must have no nulls.")
obj = super().take(indices)
obj.names = self.names
return obj
@_cudf_nvtx_annotate
def serialize(self):
header, frames = super().serialize()
# Overwrite the names in _data with the true names.
header["column_names"] = pickle.dumps(self.names)
return header, frames
@classmethod
@_cudf_nvtx_annotate
def deserialize(cls, header, frames):
# Spoof the column names to construct the frame, then set manually.
column_names = pickle.loads(header["column_names"])
header["column_names"] = pickle.dumps(range(0, len(column_names)))
obj = super().deserialize(header, frames)
return obj._set_names(column_names)
@_cudf_nvtx_annotate
def __getitem__(self, index):
flatten = isinstance(index, int)
if isinstance(index, (Integral, abc.Sequence)):
index = np.array(index)
elif isinstance(index, slice):
start, stop, step = index.indices(len(self))
index = column.arange(start, stop, step)
result = MultiIndex.from_frame(
self.to_frame(index=False, name=range(0, self.nlevels)).take(
index
),
names=self.names,
)
# we are indexing into a single row of the MultiIndex,
# return that row as a tuple:
if flatten:
return result.to_pandas()[0]
if self._codes_frame is not None:
result._codes = self._codes_frame.take(index)
if self._levels is not None:
result._levels = self._levels
return result
@_cudf_nvtx_annotate
def to_frame(self, index=True, name=no_default, allow_duplicates=False):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
Parameters
----------
index : bool, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of str, optional
The passed names should substitute index level names.
allow_duplicates : bool, optional default False
Allow duplicate column labels to be created. Note
that this parameter is non-functional because
duplicates column labels aren't supported in cudf.
Returns
-------
DataFrame
Examples
--------
>>> import cudf
>>> mi = cudf.MultiIndex.from_tuples([('a', 'c'), ('b', 'd')])
>>> mi
MultiIndex([('a', 'c'),
('b', 'd')],
)
>>> df = mi.to_frame()
>>> df
0 1
a c a c
b d b d
>>> df = mi.to_frame(index=False)
>>> df
0 1
0 a c
1 b d
>>> df = mi.to_frame(name=['x', 'y'])
>>> df
x y
a c a c
b d b d
"""
# TODO: Currently this function makes a shallow copy, which is
# incorrect. We want to make a deep copy, otherwise further
# modifications of the resulting DataFrame will affect the MultiIndex.
if name is None:
warnings.warn(
"Explicitly passing `name=None` currently preserves the "
"Index's name or uses a default name of 0. This behaviour "
"is deprecated, and in the future `None` will be used "
"as the name of the resulting DataFrame column.",
FutureWarning,
)
name = no_default
if name is not no_default:
if len(name) != len(self.levels):
raise ValueError(
"'name' should have the same length as "
"number of levels on index."
)
column_names = name
else:
column_names = self.names
all_none_names = None
if not (
all_none_names := all(x is None for x in column_names)
) and len(column_names) != len(set(column_names)):
raise ValueError("Duplicate column names are not allowed")
df = cudf.DataFrame._from_data(
data=self._data,
columns=column_names
if name is not no_default and not all_none_names
else None,
)
if index:
df = df.set_index(self)
return df
@_cudf_nvtx_annotate
def get_level_values(self, level):
"""
Return the values at the requested level
Parameters
----------
level : int or label
Returns
-------
An Index containing the values at the requested level.
"""
colnames = self._data.names
if level not in colnames:
if isinstance(level, int):
if level < 0:
level = level + len(colnames)
if level < 0 or level >= len(colnames):
raise IndexError(f"Invalid level number: '{level}'")
level_idx = level
level = colnames[level_idx]
elif level in self.names:
level_idx = list(self.names).index(level)
level = colnames[level_idx]
else:
raise KeyError(f"Level not found: '{level}'")
else:
level_idx = colnames.index(level)
level_values = as_index(self._data[level], name=self.names[level_idx])
return level_values
def _is_numeric(self):
return False
def _is_boolean(self):
return False
def _is_integer(self):
return False
def _is_floating(self):
return False
def _is_object(self):
return False
def _is_categorical(self):
return False
def _is_interval(self):
return False
@classmethod
@_cudf_nvtx_annotate
def _concat(cls, objs):
source_data = [o.to_frame(index=False) for o in objs]
# TODO: Verify if this is really necessary or if we can rely on
# DataFrame._concat.
if len(source_data) > 1:
colnames = source_data[0]._data.to_pandas_index()
for obj in source_data[1:]:
obj.columns = colnames
source_data = cudf.DataFrame._concat(source_data)
names = [None] * source_data._num_columns
objs = list(filter(lambda o: o.names is not None, objs))
for o in range(len(objs)):
for i, name in enumerate(objs[o].names):
names[i] = names[i] or name
return cudf.MultiIndex.from_frame(source_data, names=names)
@classmethod
@_cudf_nvtx_annotate
def from_tuples(cls, tuples, names=None):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, 'red'), (1, 'blue'),
... (2, 'red'), (2, 'blue')]
>>> cudf.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
names=['number', 'color'])
"""
# Use Pandas for handling Python host objects
pdi = pd.MultiIndex.from_tuples(tuples, names=names)
return cls.from_pandas(pdi)
@_cudf_nvtx_annotate
def to_numpy(self):
return self.values_host
@property # type: ignore
@_cudf_nvtx_annotate
def values_host(self):
"""
Return a numpy representation of the MultiIndex.
Only the values in the MultiIndex will be returned.
Returns
-------
out : numpy.ndarray
The values of the MultiIndex.
Examples
--------
>>> import cudf
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, 5], [1, 2, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx.values_host
array([(1, 1), (1, 5), (3, 2), (4, 2), (5, 1)], dtype=object)
>>> type(midx.values_host)
<class 'numpy.ndarray'>
"""
return self.to_pandas().values
@property # type: ignore
@_cudf_nvtx_annotate
def values(self):
"""
Return a CuPy representation of the MultiIndex.
Only the values in the MultiIndex will be returned.
Returns
-------
out: cupy.ndarray
The values of the MultiIndex.
Examples
--------
>>> import cudf
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, 5], [1, 2, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx.values
array([[1, 1],
[1, 5],
[3, 2],
[4, 2],
[5, 1]])
>>> type(midx.values)
<class 'cupy...ndarray'>
"""
if cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError(
"Unable to create a cupy array with tuples."
)
return self.to_frame(index=False).values
@classmethod
@_cudf_nvtx_annotate
def from_frame(cls, df, names=None):
"""
Make a MultiIndex from a DataFrame.
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> cudf.MultiIndex.from_frame(df)
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> cudf.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex([('HI', 'Temp'),
('HI', 'Precip'),
('NJ', 'Temp'),
('NJ', 'Precip')],
names=['state', 'observation'])
"""
obj = cls.__new__(cls)
super(cls, obj).__init__()
source_data = df.copy(deep=False)
source_data.reset_index(drop=True, inplace=True)
if isinstance(source_data, pd.DataFrame):
source_data = cudf.DataFrame.from_pandas(source_data)
names = names if names is not None else source_data._data.names
# if names are unique
# try using those as the source_data column names:
if len(dict.fromkeys(names)) == len(names):
source_data.columns = names
obj._name = None
obj._data = source_data._data
obj.names = names
obj._codes = None
obj._levels = None
return obj
@classmethod
@_cudf_nvtx_annotate
def from_product(cls, arrays, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
names : list / sequence of str, optional
Names for the levels in the index.
If not explicitly provided, names will be inferred from the
elements of iterables if an element has a name attribute
Returns
-------
MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> cudf.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex([(0, 'green'),
(0, 'purple'),
(1, 'green'),
(1, 'purple'),
(2, 'green'),
(2, 'purple')],
names=['number', 'color'])
"""
# Use Pandas for handling Python host objects
pdi = pd.MultiIndex.from_product(arrays, names=names)
return cls.from_pandas(pdi)
@_cudf_nvtx_annotate
def _poplevels(self, level):
"""
Remove and return the specified levels from self.
Parameters
----------
level : level name or index, list
One or more levels to remove
Returns
-------
Index composed of the removed levels. If only a single level
is removed, a flat index is returned. If no levels are specified
(empty list), None is returned.
"""
if not pd.api.types.is_list_like(level):
level = (level,)
ilevels = sorted(self._level_index_from_level(lev) for lev in level)
if not ilevels:
return None
popped_data = {}
popped_names = []
names = list(self.names)
# build the popped data and names
for i in ilevels:
n = self._data.names[i]
popped_data[n] = self._data[n]
popped_names.append(self.names[i])
# pop the levels out from self
# this must be done iterating backwards
for i in reversed(ilevels):
n = self._data.names[i]
names.pop(i)
popped_data[n] = self._data.pop(n)
# construct the popped result
popped = cudf.core.index._index_from_data(popped_data)
popped.names = popped_names
# update self
self.names = names
self._compute_levels_and_codes()
return popped
@_cudf_nvtx_annotate
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int or str, default -2
First level of index to be swapped.
j : int or str, default -1
Second level of index to be swapped.
Returns
-------
MultiIndex
A new MultiIndex.
Examples
--------
>>> import cudf
>>> mi = cudf.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex([('a', 'bb'),
('a', 'aa'),
('b', 'bb'),
('b', 'aa')],
)
>>> mi.swaplevel(0, 1)
MultiIndex([('bb', 'a'),
('aa', 'a'),
('bb', 'b'),
('aa', 'b')],
)
"""
name_i = self._data.names[i] if isinstance(i, int) else i
name_j = self._data.names[j] if isinstance(j, int) else j
new_data = {}
for k, v in self._data.items():
if k not in (name_i, name_j):
new_data[k] = v
elif k == name_i:
new_data[name_j] = self._data[name_j]
elif k == name_j:
new_data[name_i] = self._data[name_i]
midx = MultiIndex._from_data(new_data)
if all(n is None for n in self.names):
midx = midx.set_names(self.names)
return midx
@_cudf_nvtx_annotate
def droplevel(self, level=-1):
"""
Removes the specified levels from the MultiIndex.
Parameters
----------
level : level name or index, list-like
Integer, name or list of such, specifying one or more
levels to drop from the MultiIndex
Returns
-------
A MultiIndex or Index object, depending on the number of remaining
levels.
Examples
--------
>>> import cudf
>>> idx = cudf.MultiIndex.from_frame(
... cudf.DataFrame(
... {
... "first": ["a", "a", "a", "b", "b", "b"],
... "second": [1, 1, 2, 2, 3, 3],
... "third": [0, 1, 2, 0, 1, 2],
... }
... )
... )
Dropping level by index:
>>> idx.droplevel(0)
MultiIndex([(1, 0),
(1, 1),
(2, 2),
(2, 0),
(3, 1),
(3, 2)],
names=['second', 'third'])
Dropping level by name:
>>> idx.droplevel("first")
MultiIndex([(1, 0),
(1, 1),
(2, 2),
(2, 0),
(3, 1),
(3, 2)],
names=['second', 'third'])
Dropping multiple levels:
>>> idx.droplevel(["first", "second"])
Int64Index([0, 1, 2, 0, 1, 2], dtype='int64', name='third')
"""
mi = self.copy(deep=False)
mi._poplevels(level)
if mi.nlevels == 1:
return mi.get_level_values(mi.names[0])
else:
return mi
@_cudf_nvtx_annotate
def to_pandas(self, nullable=False, **kwargs):
result = self.to_frame(
index=False, name=list(range(self.nlevels))
).to_pandas(nullable=nullable)
return pd.MultiIndex.from_frame(result, names=self.names)
@classmethod
@_cudf_nvtx_annotate
def from_pandas(cls, multiindex, nan_as_null=no_default):
"""
Convert from a Pandas MultiIndex
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> pmi = pd.MultiIndex(levels=[['a', 'b'], ['c', 'd']],
... codes=[[0, 1], [1, 1]])
>>> cudf.from_pandas(pmi)
MultiIndex([('a', 'd'),
('b', 'd')],
)
"""
if not isinstance(multiindex, pd.MultiIndex):
raise TypeError("not a pandas.MultiIndex")
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
# if `multiindex` has two or more levels that
# have the same name, then `multiindex.to_frame()`
# results in a DataFrame containing only one of those
# levels. Thus, set `names` to some tuple of unique values
# and then call `multiindex.to_frame(name=names)`,
# which preserves all levels of `multiindex`.
names = tuple(range(len(multiindex.names)))
df = cudf.DataFrame.from_pandas(
multiindex.to_frame(index=False, name=names), nan_as_null
)
return cls.from_frame(df, names=multiindex.names)
@cached_property # type: ignore
@_cudf_nvtx_annotate
def is_unique(self):
return len(self) == len(self.unique())
@property
def dtype(self):
return np.dtype("O")
@cached_property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic_increasing(self):
"""
Return if the index is monotonic increasing
(only equal or increasing) values.
"""
return self._is_sorted(ascending=None, null_position=None)
@cached_property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic_decreasing(self):
"""
Return if the index is monotonic decreasing
(only equal or decreasing) values.
"""
return self._is_sorted(
ascending=[False] * len(self.levels), null_position=None
)
@_cudf_nvtx_annotate
def fillna(self, value):
"""
Fill null values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill nulls. This value cannot be a
list-likes.
Returns
-------
filled : MultiIndex
Examples
--------
>>> import cudf
>>> index = cudf.MultiIndex(
... levels=[["a", "b", "c", None], ["1", None, "5"]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> index
MultiIndex([( 'a', '1'),
( 'a', '5'),
( 'b', <NA>),
( 'c', <NA>),
(<NA>, '1')],
names=['x', 'y'])
>>> index.fillna('hello')
MultiIndex([( 'a', '1'),
( 'a', '5'),
( 'b', 'hello'),
( 'c', 'hello'),
('hello', '1')],
names=['x', 'y'])
"""
return super().fillna(value=value)
@_cudf_nvtx_annotate
def unique(self):
return self.drop_duplicates(keep="first")
def _clean_nulls_from_index(self):
"""
Convert all na values(if any) in MultiIndex object
to `<NA>` as a preprocessing step to `__repr__` methods.
"""
index_df = self.to_frame(index=False, name=list(range(self.nlevels)))
return MultiIndex.from_frame(
index_df._clean_nulls_from_dataframe(index_df), names=self.names
)
@_cudf_nvtx_annotate
def memory_usage(self, deep=False):
usage = sum(col.memory_usage for col in self._data.columns)
if self.levels:
for level in self.levels:
usage += level.memory_usage(deep=deep)
if self._codes_frame:
for col in self._codes_frame._data.columns:
usage += col.memory_usage
return usage
@_cudf_nvtx_annotate
def difference(self, other, sort=None):
if hasattr(other, "to_pandas"):
other = other.to_pandas()
return cudf.from_pandas(self.to_pandas().difference(other, sort))
@_cudf_nvtx_annotate
def append(self, other):
"""
Append a collection of MultiIndex objects together
Parameters
----------
other : MultiIndex or list/tuple of MultiIndex objects
Returns
-------
appended : Index
Examples
--------
>>> import cudf
>>> idx1 = cudf.MultiIndex(
... levels=[[1, 2], ['blue', 'red']],
... codes=[[0, 0, 1, 1], [1, 0, 1, 0]]
... )
>>> idx2 = cudf.MultiIndex(
... levels=[[3, 4], ['blue', 'red']],
... codes=[[0, 0, 1, 1], [1, 0, 1, 0]]
... )
>>> idx1
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue')],
)
>>> idx2
MultiIndex([(3, 'red'),
(3, 'blue'),
(4, 'red'),
(4, 'blue')],
)
>>> idx1.append(idx2)
MultiIndex([(1, 'red'),
(1, 'blue'),
(2, 'red'),
(2, 'blue'),
(3, 'red'),
(3, 'blue'),
(4, 'red'),
(4, 'blue')],
)
"""
if isinstance(other, (list, tuple)):
to_concat = [self]
to_concat.extend(other)
else:
to_concat = [self, other]
for obj in to_concat:
if not isinstance(obj, MultiIndex):
raise TypeError(
f"all objects should be of type "
f"MultiIndex for MultiIndex.append, "
f"found object of type: {type(obj)}"
)
return MultiIndex._concat(to_concat)
@_cudf_nvtx_annotate
def __array_function__(self, func, types, args, kwargs):
cudf_df_module = MultiIndex
for submodule in func.__module__.split(".")[1:]:
# point cudf to the correct submodule
if hasattr(cudf_df_module, submodule):
cudf_df_module = getattr(cudf_df_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [cudf_df_module, np.ndarray]
for t in types:
if t not in handled_types:
return NotImplemented
if hasattr(cudf_df_module, fname):
cudf_func = getattr(cudf_df_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
return cudf_func(*args, **kwargs)
else:
return NotImplemented
def _level_index_from_level(self, level):
"""
Return level index from given level name or index
"""
try:
return self.names.index(level)
except ValueError:
if not is_integer(level):
raise KeyError(f"Level {level} not found")
if level < 0:
level += self.nlevels
if level >= self.nlevels:
raise IndexError(
f"Level {level} out of bounds. "
f"Index has {self.nlevels} levels."
) from None
return level
@_cudf_nvtx_annotate
def get_loc(self, key, method=None, tolerance=None):
"""
Get location for a label or a tuple of labels.
The location is returned as an integer/slice or boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
- If index is unique, search result is unique, return a single int.
- If index is monotonic, index is returned as a slice object.
- Otherwise, cudf attempts a best effort to convert the search
result into a slice object, and will return a boolean mask if
failed to do so. Notice this can deviate from Pandas behavior
in some situations.
Examples
--------
>>> import cudf
>>> mi = cudf.MultiIndex.from_tuples(
... [('a', 'd'), ('b', 'e'), ('b', 'f')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
>>> non_monotonic_non_unique_idx = cudf.MultiIndex.from_tuples(
... [('c', 'd'), ('b', 'e'), ('a', 'f'), ('b', 'e')])
>>> non_monotonic_non_unique_idx.get_loc('b') # differ from pandas
slice(1, 4, 2)
.. pandas-compat::
**MultiIndex.get_loc**
The return types of this function may deviates from the
method provided by Pandas. If the index is neither
lexicographically sorted nor unique, a best effort attempt is made
to coerce the found indices into a slice. For example:
.. code-block::
>>> import pandas as pd
>>> import cudf
>>> x = pd.MultiIndex.from_tuples([
... (2, 1, 1), (1, 2, 3), (1, 2, 1),
... (1, 1, 1), (1, 1, 1), (2, 2, 1),
... ])
>>> x.get_loc(1)
array([False, True, True, True, True, False])
>>> cudf.from_pandas(x).get_loc(1)
slice(1, 5, 1)
"""
if tolerance is not None:
raise NotImplementedError(
"Parameter tolerance is not supported yet."
)
if method is not None:
raise NotImplementedError(
"only the default get_loc method is currently supported for"
" MultiIndex"
)
is_sorted = (
self.is_monotonic_increasing or self.is_monotonic_decreasing
)
is_unique = self.is_unique
key = (key,) if not isinstance(key, tuple) else key
# Handle partial key search. If length of `key` is less than `nlevels`,
# Only search levels up to `len(key)` level.
key_as_table = cudf.core.frame.Frame(
{i: column.as_column(k, length=1) for i, k in enumerate(key)}
)
partial_index = self.__class__._from_data(
data=self._data.select_by_index(slice(key_as_table._num_columns))
)
(
lower_bound,
upper_bound,
sort_inds,
) = _lexsorted_equal_range(partial_index, key_as_table, is_sorted)
if lower_bound == upper_bound:
raise KeyError(key)
if is_unique and lower_bound + 1 == upper_bound:
# Indices are unique (Pandas constraint), search result is unique,
# return int.
return (
lower_bound
if is_sorted
else sort_inds.element_indexing(lower_bound)
)
if is_sorted:
# In monotonic index, lex search result is continuous. A slice for
# the range is returned.
return slice(lower_bound, upper_bound)
true_inds = sort_inds.slice(lower_bound, upper_bound).values
true_inds = _maybe_indices_to_slice(true_inds)
if isinstance(true_inds, slice):
return true_inds
# Not sorted and not unique. Return a boolean mask
mask = cp.full(self._data.nrows, False)
mask[true_inds] = True
return mask
def _get_reconciled_name_object(self, other) -> MultiIndex:
"""
If the result of a set operation will be self,
return self, unless the names change, in which
case make a shallow copy of self.
"""
names = self._maybe_match_names(other)
if self.names != names:
return self.rename(names)
return self
def _maybe_match_names(self, other):
"""
Try to find common names to attach to the result of an operation
between a and b. Return a consensus list of names if they match
at least partly or list of None if they have completely
different names.
"""
if len(self.names) != len(other.names):
return [None] * len(self.names)
return [
self_name if _is_same_name(self_name, other_name) else None
for self_name, other_name in zip(self.names, other.names)
]
@_cudf_nvtx_annotate
def union(self, other, sort=None):
if not isinstance(other, MultiIndex):
msg = "other must be a MultiIndex or a list of tuples"
try:
other = MultiIndex.from_tuples(other, names=self.names)
except (ValueError, TypeError) as err:
# ValueError raised by tuples_to_object_array if we
# have non-object dtype
raise TypeError(msg) from err
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if not len(other) or self.equals(other):
return self._get_reconciled_name_object(other)
elif not len(self):
return other._get_reconciled_name_object(self)
return self._union(other, sort=sort)
@_cudf_nvtx_annotate
def _union(self, other, sort=None):
# TODO: When to_frame is refactored to return a
# deep copy in future, we should push most of the common
# logic between MultiIndex._union & BaseIndex._union into
# GenericIndex._union.
other_df = other.copy(deep=True).to_frame(index=False)
self_df = self.copy(deep=True).to_frame(index=False)
col_names = list(range(0, self.nlevels))
self_df.columns = col_names
other_df.columns = col_names
self_df["order"] = self_df.index
other_df["order"] = other_df.index
result_df = self_df.merge(other_df, on=col_names, how="outer")
result_df = result_df.sort_values(
by=result_df._data.to_pandas_index()[self.nlevels :],
ignore_index=True,
)
midx = MultiIndex.from_frame(result_df.iloc[:, : self.nlevels])
midx.names = self.names if self.names == other.names else None
if sort is None and len(other):
return midx.sort_values()
return midx
@_cudf_nvtx_annotate
def _intersection(self, other, sort=None):
if self.names != other.names:
deep = True
col_names = list(range(0, self.nlevels))
res_name = (None,) * self.nlevels
else:
deep = False
col_names = None
res_name = self.names
other_df = other.copy(deep=deep).to_frame(index=False)
self_df = self.copy(deep=deep).to_frame(index=False)
if col_names is not None:
other_df.columns = col_names
self_df.columns = col_names
result_df = cudf.merge(self_df, other_df, how="inner")
midx = self.__class__.from_frame(result_df, names=res_name)
if sort is None and len(other):
return midx.sort_values()
return midx
@_cudf_nvtx_annotate
def _copy_type_metadata(
self: MultiIndex, other: MultiIndex, *, override_dtypes=None
) -> MultiIndex:
res = super()._copy_type_metadata(other)
res._names = other._names
return res
@_cudf_nvtx_annotate
def _split_columns_by_levels(self, levels):
# This function assumes that for levels with duplicate names, they are
# specified by indices, not name by ``levels``. E.g. [None, None] can
# only be specified by 0, 1, not "None".
if levels is None:
return (
list(self._data.columns),
[],
[
f"level_{i}" if name is None else name
for i, name in enumerate(self.names)
],
[],
)
# Normalize named levels into indices
level_names = list(self.names)
level_indices = {
lv if isinstance(lv, int) else level_names.index(lv)
for lv in levels
}
# Split the columns
data_columns, index_columns = [], []
data_names, index_names = [], []
for i, (name, col) in enumerate(zip(self.names, self._data.columns)):
if i in level_indices:
name = f"level_{i}" if name is None else name
data_columns.append(col)
data_names.append(name)
else:
index_columns.append(col)
index_names.append(name)
return data_columns, index_columns, data_names, index_names
def repeat(self, repeats, axis=None):
return self._from_columns_like_self(
Frame._repeat([*self._columns], repeats, axis), self._column_names
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/index.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from __future__ import annotations
import math
import pickle
import warnings
from functools import cache, cached_property
from numbers import Number
from typing import (
Any,
Dict,
List,
MutableMapping,
Optional,
Sequence,
Tuple,
Type,
Union,
)
import cupy
import numpy as np
import pandas as pd
from pandas._config import get_option
import cudf
from cudf._lib.datetime import extract_quarter, is_leap_year
from cudf._lib.filling import sequence
from cudf._lib.search import search_sorted
from cudf._lib.types import size_type_dtype
from cudf.api.extensions import no_default
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
is_categorical_dtype,
is_dtype_equal,
is_integer,
is_interval_dtype,
is_list_like,
is_scalar,
is_signed_integer_dtype,
is_string_dtype,
)
from cudf.core._base_index import BaseIndex
from cudf.core.column import (
CategoricalColumn,
ColumnBase,
DatetimeColumn,
IntervalColumn,
NumericalColumn,
StringColumn,
StructColumn,
TimeDeltaColumn,
column,
)
from cudf.core.column.column import as_column, concat_columns
from cudf.core.column.string import StringMethods as StringMethods
from cudf.core.dtypes import IntervalDtype
from cudf.core.frame import Frame
from cudf.core.mixins import BinaryOperand
from cudf.core.single_column_frame import SingleColumnFrame
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import (
_maybe_convert_to_default_type,
find_common_type,
is_mixed_with_object_dtype,
numeric_normalize_types,
)
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import _warn_no_dask_cudf, search_range
def _lexsorted_equal_range(
idx: Union[GenericIndex, cudf.MultiIndex],
key_as_table: Frame,
is_sorted: bool,
) -> Tuple[int, int, Optional[ColumnBase]]:
"""Get equal range for key in lexicographically sorted index. If index
is not sorted when called, a sort will take place and `sort_inds` is
returned. Otherwise `None` is returned in that position.
"""
if not is_sorted:
sort_inds = idx._get_sorted_inds()
sort_vals = idx._gather(sort_inds)
else:
sort_inds = None
sort_vals = idx
lower_bound = search_sorted(
[*sort_vals._data.columns], [*key_as_table._columns], side="left"
).element_indexing(0)
upper_bound = search_sorted(
[*sort_vals._data.columns], [*key_as_table._columns], side="right"
).element_indexing(0)
return lower_bound, upper_bound, sort_inds
def _index_from_data(data: MutableMapping, name: Any = no_default):
"""Construct an index of the appropriate type from some data."""
if len(data) == 0:
raise ValueError("Cannot construct Index from any empty Table")
if len(data) == 1:
values = next(iter(data.values()))
if isinstance(values, NumericalColumn):
try:
index_class_type: Type[
Union[GenericIndex, cudf.MultiIndex]
] = _dtype_to_index[values.dtype.type]
except KeyError:
index_class_type = GenericIndex
elif isinstance(values, DatetimeColumn):
index_class_type = DatetimeIndex
elif isinstance(values, TimeDeltaColumn):
index_class_type = TimedeltaIndex
elif isinstance(values, StringColumn):
index_class_type = StringIndex
elif isinstance(values, CategoricalColumn):
index_class_type = CategoricalIndex
elif isinstance(values, (IntervalColumn, StructColumn)):
index_class_type = IntervalIndex
else:
raise NotImplementedError(
"Unsupported column type passed to "
f"create an Index: {type(values)}"
)
else:
index_class_type = cudf.MultiIndex
return index_class_type._from_data(data, name)
def _index_from_columns(
columns: List[cudf.core.column.ColumnBase], name: Any = no_default
):
"""Construct an index from ``columns``, with levels named 0, 1, 2..."""
return _index_from_data(dict(zip(range(len(columns)), columns)), name=name)
class RangeIndex(BaseIndex, BinaryOperand):
"""
Immutable Index implementing a monotonic integer range.
This is the default index type used by DataFrame and Series
when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other range instance
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index.
dtype : numpy dtype
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
Attributes
----------
start
stop
step
Methods
-------
to_numpy
to_arrow
Examples
--------
>>> import cudf
>>> cudf.RangeIndex(0, 10, 1, name="a")
RangeIndex(start=0, stop=10, step=1, name='a')
>>> cudf.RangeIndex(range(1, 10, 1), name="a")
RangeIndex(start=1, stop=10, step=1, name='a')
"""
_VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS
_range: range
@_cudf_nvtx_annotate
def __init__(
self, start, stop=None, step=1, dtype=None, copy=False, name=None
):
if step == 0:
raise ValueError("Step must not be zero.")
if not cudf.api.types.is_hashable(name):
raise ValueError("Name must be a hashable value.")
if dtype is not None and not is_signed_integer_dtype(dtype):
raise ValueError(f"{dtype=} must be a signed integer type")
if isinstance(start, range):
therange = start
start = therange.start
stop = therange.stop
step = therange.step
if stop is None:
start, stop = 0, start
if not is_integer(start):
raise TypeError(
f"start must be an integer, not {type(start).__name__}"
)
self._start = int(start)
if not is_integer(stop):
raise TypeError(
f"stop must be an integer, not {type(stop).__name__}"
)
self._stop = int(stop)
if step is not None:
if not is_integer(step):
raise TypeError(
f"step must be an integer, not {type(step).__name__}"
)
self._step = int(step)
else:
self._step = 1
self._index = None
self._name = name
self._range = range(self._start, self._stop, self._step)
# _end is the actual last element of RangeIndex,
# whereas _stop is an upper bound.
self._end = self._start + self._step * (len(self._range) - 1)
def _copy_type_metadata(
self: RangeIndex, other: RangeIndex, *, override_dtypes=None
) -> RangeIndex:
# There is no metadata to be copied for RangeIndex since it does not
# have an underlying column.
return self
def searchsorted(
self,
value: int,
side: str = "left",
ascending: bool = True,
na_position: str = "last",
):
assert (len(self) <= 1) or (
ascending == (self._step > 0)
), "Invalid ascending flag"
return search_range(value, self.as_range, side=side)
@property # type: ignore
@_cudf_nvtx_annotate
def name(self):
return self._name
@name.setter # type: ignore
@_cudf_nvtx_annotate
def name(self, value):
self._name = value
@property # type: ignore
@_cudf_nvtx_annotate
def start(self):
"""
The value of the `start` parameter (0 if this was not supplied).
"""
return self._start
@property # type: ignore
@_cudf_nvtx_annotate
def stop(self):
"""
The value of the stop parameter.
"""
return self._stop
@property # type: ignore
@_cudf_nvtx_annotate
def step(self):
"""
The value of the step parameter.
"""
return self._step
@property # type: ignore
@_cudf_nvtx_annotate
def _num_rows(self):
return len(self)
@cached_property # type: ignore
@_cudf_nvtx_annotate
def _values(self):
if len(self) > 0:
return column.arange(
self._start, self._stop, self._step, dtype=self.dtype
)
else:
return column.column_empty(0, masked=False, dtype=self.dtype)
def _clean_nulls_from_index(self):
return self
def _is_numeric(self):
return True
def _is_boolean(self):
return False
def _is_integer(self):
return True
def _is_floating(self):
return False
def _is_object(self):
return False
def _is_categorical(self):
return False
def _is_interval(self):
return False
@property # type: ignore
@_cudf_nvtx_annotate
def hasnans(self):
return False
@property # type: ignore
@_cudf_nvtx_annotate
def _data(self):
return cudf.core.column_accessor.ColumnAccessor(
{self.name: self._values}
)
@_cudf_nvtx_annotate
def __contains__(self, item):
if isinstance(item, bool) or not isinstance(
item, tuple(np.sctypes["int"] + np.sctypes["float"] + [int, float])
):
return False
try:
int_item = int(item)
return int_item == item and int_item in self._range
except (ValueError, OverflowError):
return False
@_cudf_nvtx_annotate
def copy(self, name=None, deep=False, dtype=None, names=None):
"""
Make a copy of this object.
Parameters
----------
name : object optional (default: None), name of index
deep : Bool (default: False)
Ignored for RangeIndex
dtype : numpy dtype optional (default: None)
Target dtype for underlying range data
.. deprecated:: 23.02
The `dtype` parameter is deprecated and will be removed in
a future version of cudf. Use the `astype` method instead.
names : list-like optional (default: False)
Kept compatibility with MultiIndex. Should not be used.
.. deprecated:: 23.04
The parameter `names` is deprecated and will be removed in
a future version of cudf. Use the `name` parameter instead.
Returns
-------
New RangeIndex instance with same range, casted to new dtype
"""
if dtype is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter dtype is deprecated and will be removed in a "
"future version. Use the astype method instead.",
FutureWarning,
)
if names is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter names is deprecated and will be removed in a "
"future version. Use the name parameter instead.",
FutureWarning,
)
dtype = self.dtype if dtype is None else dtype
if not np.issubdtype(dtype, np.signedinteger):
raise ValueError(f"Expected Signed Integer Type, Got {dtype}")
name = self.name if name is None else name
return RangeIndex(
start=self._start, stop=self._stop, step=self._step, name=name
)
@_cudf_nvtx_annotate
def astype(self, dtype, copy: bool = True):
if is_dtype_equal(dtype, self.dtype):
return self
return self._as_int_index().astype(dtype, copy=copy)
@_cudf_nvtx_annotate
def drop_duplicates(self, keep="first"):
return self
@_cudf_nvtx_annotate
def duplicated(self, keep="first"):
return cupy.zeros(len(self), dtype=bool)
@_cudf_nvtx_annotate
def __repr__(self):
return (
f"{self.__class__.__name__}(start={self._start}, stop={self._stop}"
f", step={self._step}"
+ (
f", name={pd.io.formats.printing.default_pprint(self.name)}"
if self.name is not None
else ""
)
+ ")"
)
@_cudf_nvtx_annotate
def __len__(self):
return len(range(self._start, self._stop, self._step))
@_cudf_nvtx_annotate
def __getitem__(self, index):
if isinstance(index, slice):
sl_start, sl_stop, sl_step = index.indices(len(self))
lo = self._start + sl_start * self._step
hi = self._start + sl_stop * self._step
st = self._step * sl_step
return RangeIndex(start=lo, stop=hi, step=st, name=self._name)
elif isinstance(index, Number):
len_self = len(self)
if index < 0:
index += len_self
if not (0 <= index < len_self):
raise IndexError("Index out of bounds")
return self._start + index * self._step
return self._as_int_index()[index]
@_cudf_nvtx_annotate
def equals(self, other):
if isinstance(other, RangeIndex):
if (self._start, self._stop, self._step) == (
other._start,
other._stop,
other._step,
):
return True
return self._as_int_index().equals(other)
@_cudf_nvtx_annotate
def serialize(self):
header = {}
header["index_column"] = {}
# store metadata values of index separately
# We don't need to store the GPU buffer for RangeIndexes
# cuDF only needs to store start/stop and rehydrate
# during de-serialization
header["index_column"]["start"] = self._start
header["index_column"]["stop"] = self._stop
header["index_column"]["step"] = self._step
frames = []
header["name"] = pickle.dumps(self.name)
header["dtype"] = pickle.dumps(self.dtype)
header["type-serialized"] = pickle.dumps(type(self))
header["frame_count"] = 0
return header, frames
@classmethod
@_cudf_nvtx_annotate
def deserialize(cls, header, frames):
h = header["index_column"]
name = pickle.loads(header["name"])
start = h["start"]
stop = h["stop"]
step = h.get("step", 1)
return RangeIndex(start=start, stop=stop, step=step, name=name)
@property # type: ignore
@_cudf_nvtx_annotate
def dtype(self):
"""
`dtype` of the range of values in RangeIndex.
By default the dtype is 64 bit signed integer. This is configurable
via `default_integer_bitwidth` as 32 bit in `cudf.options`
"""
dtype = np.dtype(np.int64)
return _maybe_convert_to_default_type(dtype)
@_cudf_nvtx_annotate
def to_pandas(self, nullable=False):
return pd.RangeIndex(
start=self._start,
stop=self._stop,
step=self._step,
dtype=self.dtype,
name=self.name,
)
@property
def is_unique(self):
return True
@cached_property
def as_range(self):
return range(self._start, self._stop, self._step)
@cached_property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cached_property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic_decreasing(self):
return self._step < 0 or len(self) <= 1
@_cudf_nvtx_annotate
def memory_usage(self, deep=False):
if deep:
warnings.warn(
"The deep parameter is ignored and is only included "
"for pandas compatibility."
)
return 0
def unique(self):
# RangeIndex always has unique values
return self
@_cudf_nvtx_annotate
def __mul__(self, other):
# Multiplication by raw ints must return a RangeIndex to match pandas.
if isinstance(other, cudf.Scalar) and other.dtype.kind in "iu":
other = other.value
elif (
isinstance(other, (np.ndarray, cupy.ndarray))
and other.ndim == 0
and other.dtype.kind in "iu"
):
other = other.item()
if isinstance(other, (int, np.integer)):
return RangeIndex(
self.start * other, self.stop * other, self.step * other
)
return self._as_int_index().__mul__(other)
@_cudf_nvtx_annotate
def __rmul__(self, other):
# Multiplication is commutative.
return self.__mul__(other)
@_cudf_nvtx_annotate
def _as_int_index(self):
# Convert self to an integer index. This method is used to perform ops
# that are not defined directly on RangeIndex.
return _dtype_to_index[self.dtype.type]._from_data(self._data)
@_cudf_nvtx_annotate
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self._as_int_index().__array_ufunc__(
ufunc, method, *inputs, **kwargs
)
@_cudf_nvtx_annotate
def get_loc(self, key, method=None, tolerance=None):
# We should not actually remove this code until we have implemented the
# get_indexers method as an alternative, see
# https://github.com/rapidsai/cudf/issues/12312
if method is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"Passing method to {self.__class__.__name__}.get_loc is "
"deprecated and will raise in a future version.",
FutureWarning,
)
# Given an actual integer,
idx = (key - self._start) / self._step
idx_int_upper_bound = (self._stop - self._start) // self._step
if method is None:
if tolerance is not None:
raise ValueError(
"tolerance argument only valid if using pad, "
"backfill or nearest lookups"
)
if idx > idx_int_upper_bound or idx < 0:
raise KeyError(key)
idx_int = (key - self._start) // self._step
if idx_int != idx:
raise KeyError(key)
return idx_int
if (method == "ffill" and idx < 0) or (
method == "bfill" and idx > idx_int_upper_bound
):
raise KeyError(key)
round_method = {
"ffill": math.floor,
"bfill": math.ceil,
"nearest": round,
}[method]
if tolerance is not None and (abs(idx) * self._step > tolerance):
raise KeyError(key)
return np.clip(round_method(idx), 0, idx_int_upper_bound, dtype=int)
@_cudf_nvtx_annotate
def _union(self, other, sort=None):
if isinstance(other, RangeIndex):
# Variable suffixes are of the
# following notation: *_o -> other, *_s -> self,
# and *_r -> result
start_s, step_s = self.start, self.step
end_s = self._end
start_o, step_o = other.start, other.step
end_o = other._end
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
# Determine minimum start value of the result.
start_r = min(start_s, start_o)
# Determine maximum end value of the result.
end_r = max(end_s, end_o)
result = None
min_step = min(step_o, step_s)
if ((start_s - start_o) % min_step) == 0:
# Checking to determine other is a subset of self with
# equal step size.
if (
step_o == step_s
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
result = type(self)(start_r, end_r + step_s, step_s)
# Checking if self is a subset of other with unequal
# step sizes.
elif (
step_o % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
result = type(self)(start_r, end_r + step_s, step_s)
# Checking if other is a subset of self with unequal
# step sizes.
elif (
step_s % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
result = type(self)(start_r, end_r + step_o, step_o)
# Checking to determine when the steps are even but one of
# the inputs spans across is near half or less then half
# the other input. This case needs manipulation to step
# size.
elif (
step_o == step_s
and (step_s % 2 == 0)
and (abs(start_s - start_o) <= step_s / 2)
and (abs(end_s - end_o) <= step_s / 2)
):
result = type(self)(start_r, end_r + step_s / 2, step_s / 2)
if result is not None:
if sort is None and not result.is_monotonic_increasing:
return result.sort_values()
else:
return result
# If all the above optimizations don't cater to the inputs,
# we materialize RangeIndexes into integer indexes and
# then perform `union`.
return self._try_reconstruct_range_index(
self._as_int_index()._union(other, sort=sort)
)
@_cudf_nvtx_annotate
def _intersection(self, other, sort=False):
if not isinstance(other, RangeIndex):
return self._try_reconstruct_range_index(
super()._intersection(other, sort=sort)
)
if not len(self) or not len(other):
return RangeIndex(0)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return RangeIndex(0)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = _extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return RangeIndex(0)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = (
first.start + (second.start - first.start) * first.step // gcd * s
)
new_step = first.step * second.step // gcd
no_steps = -(-(int_low - tmp_start) // abs(new_step))
new_start = tmp_start + abs(new_step) * no_steps
new_range = range(new_start, int_high, new_step)
new_index = RangeIndex(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return self._try_reconstruct_range_index(new_index)
@_cudf_nvtx_annotate
def difference(self, other, sort=None):
if isinstance(other, RangeIndex) and self.equals(other):
return self[:0]._get_reconciled_name_object(other)
return self._try_reconstruct_range_index(
super().difference(other, sort=sort)
)
def _try_reconstruct_range_index(self, index):
if isinstance(index, RangeIndex) or index.dtype.kind == "f":
return index
# Evenly spaced values can return a
# RangeIndex instead of a materialized Index.
if not index._column.has_nulls():
uniques = cupy.unique(cupy.diff(index.values))
if len(uniques) == 1 and uniques[0].get() != 0:
diff = uniques[0].get()
new_range = range(index[0], index[-1] + diff, diff)
return type(self)(new_range, name=index.name)
return index
def sort_values(
self,
return_indexer=False,
ascending=True,
na_position="last",
key=None,
):
if key is not None:
raise NotImplementedError("key parameter is not yet implemented.")
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
sorted_index = self
indexer = RangeIndex(range(len(self)))
sorted_index = self
if ascending:
if self.step < 0:
sorted_index = self[::-1]
indexer = indexer[::-1]
else:
if self.step > 0:
sorted_index = self[::-1]
indexer = indexer = indexer[::-1]
if return_indexer:
return sorted_index, indexer
else:
return sorted_index
@_cudf_nvtx_annotate
def _gather(self, gather_map, nullify=False, check_bounds=True):
gather_map = cudf.core.column.as_column(gather_map)
return _dtype_to_index[self.dtype.type]._from_columns(
[self._values.take(gather_map, nullify, check_bounds)], [self.name]
)
@_cudf_nvtx_annotate
def _apply_boolean_mask(self, boolean_mask):
return _dtype_to_index[self.dtype.type]._from_columns(
[self._values.apply_boolean_mask(boolean_mask)], [self.name]
)
def repeat(self, repeats, axis=None):
return self._as_int_index().repeat(repeats, axis)
def _split(self, splits):
return _dtype_to_index[self.dtype.type]._from_columns(
[self._as_int_index()._split(splits)], [self.name]
)
def _binaryop(self, other, op: str):
# TODO: certain binops don't require materializing range index and
# could use some optimization.
return self._as_int_index()._binaryop(other, op=op)
def join(
self, other, how="left", level=None, return_indexers=False, sort=False
):
if how in {"left", "right"} or self.equals(other):
# pandas supports directly merging RangeIndex objects and can
# intelligently create RangeIndex outputs depending on the type of
# join. Hence falling back to performing a merge on pd.RangeIndex
# since the conversion is cheap.
if isinstance(other, RangeIndex):
result = self.to_pandas().join(
other.to_pandas(),
how=how,
level=level,
return_indexers=return_indexers,
sort=sort,
)
if return_indexers:
return tuple(
cudf.from_pandas(result[0]), result[1], result[2]
)
else:
return cudf.from_pandas(result)
return self._as_int_index().join(
other, how, level, return_indexers, sort
)
@property # type: ignore
@_cudf_nvtx_annotate
def _column(self):
return self._as_int_index()._column
@property # type: ignore
@_cudf_nvtx_annotate
def _columns(self):
return self._as_int_index()._columns
@property # type: ignore
@_cudf_nvtx_annotate
def values_host(self):
return self.to_pandas().values
@_cudf_nvtx_annotate
def argsort(
self,
ascending=True,
na_position="last",
):
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
indices = cupy.arange(0, len(self))
if (ascending and self._step < 0) or (
not ascending and self._step > 0
):
indices = indices[::-1]
return indices
@_cudf_nvtx_annotate
def where(self, cond, other=None, inplace=False):
return self._as_int_index().where(cond, other, inplace)
@_cudf_nvtx_annotate
def to_numpy(self):
return self.values_host
@_cudf_nvtx_annotate
def to_arrow(self):
return self._as_int_index().to_arrow()
def __array__(self, dtype=None):
raise TypeError(
"Implicit conversion to a host NumPy array via __array__ is not "
"allowed, To explicitly construct a GPU matrix, consider using "
".to_cupy()\nTo explicitly construct a host matrix, consider "
"using .to_numpy()."
)
@_cudf_nvtx_annotate
def nunique(self):
return len(self)
@_cudf_nvtx_annotate
def isna(self):
return cupy.zeros(len(self), dtype=bool)
isnull = isna
@_cudf_nvtx_annotate
def notna(self):
return cupy.ones(len(self), dtype=bool)
notnull = isna
@_cudf_nvtx_annotate
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (
meth == "max" and self.step < 0
):
return self.start
return self.start + self.step * no_steps
def min(self):
return self._minmax("min")
def max(self):
return self._minmax("max")
@property
def values(self):
return cupy.arange(self.start, self.stop, self.step)
def any(self):
return any(self._range)
def append(self, other):
result = self._as_int_index().append(other)
return self._try_reconstruct_range_index(result)
def _indices_of(self, value) -> cudf.core.column.NumericalColumn:
if isinstance(value, (bool, np.bool_)):
raise ValueError(
f"Cannot use {type(value).__name__} to get an index of a "
f"{type(self).__name__}."
)
try:
i = [self._range.index(value)]
except ValueError:
i = []
return as_column(i, dtype=size_type_dtype)
def isin(self, values):
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a {type(values).__name__}"
)
return self._values.isin(values).values
def __neg__(self):
return -self._as_int_index()
def __pos__(self):
return +self._as_int_index()
def __abs__(self):
return abs(self._as_int_index())
@_warn_no_dask_cudf
def __dask_tokenize__(self):
return (type(self), self.start, self.stop, self.step)
class GenericIndex(SingleColumnFrame, BaseIndex):
"""
An array of orderable values that represent the indices of another Column
Attributes
----------
_values: A Column object
name: A string
Parameters
----------
data : Column
The Column of data for this index
name : str optional
The name of the Index. If not provided, the Index adopts the value
Column's name. Otherwise if this name is different from the value
Column's, the data Column will be cloned to adopt this name.
"""
@_cudf_nvtx_annotate
def __init__(self, data, **kwargs):
kwargs = _setdefault_name(data, **kwargs)
# normalize the input
if isinstance(data, cudf.Series):
data = data._column
elif isinstance(data, column.ColumnBase):
data = data
else:
if isinstance(data, (list, tuple)):
if len(data) == 0:
data = np.asarray([], dtype="int64")
else:
data = np.asarray(data)
data = column.as_column(data)
assert isinstance(data, (NumericalColumn, StringColumn))
name = kwargs.get("name")
super().__init__({name: data})
@_cudf_nvtx_annotate
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
ret = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
if ret is not None:
return ret
# Attempt to dispatch all other functions to cupy.
cupy_func = getattr(cupy, ufunc.__name__)
if cupy_func:
if ufunc.nin == 2:
other = inputs[self is inputs[0]]
inputs = self._make_operands_for_binop(other)
else:
inputs = {
name: (col, None, False, None)
for name, col in self._data.items()
}
data = self._apply_cupy_ufunc_to_operands(
ufunc, cupy_func, inputs, **kwargs
)
out = [_index_from_data(out) for out in data]
# pandas returns numpy arrays when the outputs are boolean.
for i, o in enumerate(out):
# We explicitly _do not_ use isinstance here: we want only
# boolean GenericIndexes, not dtype-specific subclasses.
if type(o) is GenericIndex and o.dtype.kind == "b":
out[i] = o.values
return out[0] if ufunc.nout == 1 else tuple(out)
return NotImplemented
@classmethod
@_cudf_nvtx_annotate
def _from_data(
cls, data: MutableMapping, name: Any = no_default
) -> GenericIndex:
out = super()._from_data(data=data)
if name is not no_default:
out.name = name
return out
def _binaryop(
self,
other: Frame,
op: str,
fill_value: Any = None,
*args,
**kwargs,
) -> SingleColumnFrame:
reflect, op = self._check_reflected_op(op)
operands = self._make_operands_for_binop(other, fill_value, reflect)
if operands is NotImplemented:
return NotImplemented
binop_result = self._colwise_binop(operands, op)
if isinstance(other, cudf.Series):
ret = other._from_data_like_self(binop_result)
other_name = other.name
else:
ret = _index_from_data(binop_result)
other_name = getattr(other, "name", self.name)
ret.name = (
self.name
if cudf.utils.utils._is_same_name(self.name, other_name)
else None
)
# pandas returns numpy arrays when the outputs are boolean. We
# explicitly _do not_ use isinstance here: we want only boolean
# GenericIndexes, not dtype-specific subclasses.
if (
isinstance(ret, (GenericIndex, cudf.Series))
and ret.dtype.kind == "b"
):
if ret._column.has_nulls():
ret = ret.fillna(op == "__ne__")
return ret.values
return ret
# Override just to make mypy happy.
@_cudf_nvtx_annotate
def _copy_type_metadata(
self: GenericIndex, other: GenericIndex, *, override_dtypes=None
) -> GenericIndex:
return super()._copy_type_metadata(
other, override_dtypes=override_dtypes
)
@property # type: ignore
@_cudf_nvtx_annotate
def _values(self):
return self._column
@classmethod
@_cudf_nvtx_annotate
def _concat(cls, objs):
non_empties = [index for index in objs if len(index)]
if all(isinstance(obj, RangeIndex) for obj in non_empties):
result = _concat_range_index(non_empties)
else:
data = concat_columns([o._values for o in non_empties])
result = as_index(data)
names = {obj.name for obj in objs}
if len(names) == 1:
name = names.pop()
else:
name = None
result.name = name
return result
@_cudf_nvtx_annotate
def memory_usage(self, deep=False):
return self._column.memory_usage
@cached_property # type: ignore
@_cudf_nvtx_annotate
def is_unique(self):
return self._column.is_unique
@_cudf_nvtx_annotate
def equals(self, other):
if (
other is None
or not isinstance(other, BaseIndex)
or len(self) != len(other)
):
return False
check_dtypes = False
self_is_categorical = isinstance(self, CategoricalIndex)
other_is_categorical = isinstance(other, CategoricalIndex)
if self_is_categorical and not other_is_categorical:
other = other.astype(self.dtype)
check_dtypes = True
elif other_is_categorical and not self_is_categorical:
self = self.astype(other.dtype)
check_dtypes = True
try:
return self._column.equals(
other._column, check_dtypes=check_dtypes
)
except TypeError:
return False
@_cudf_nvtx_annotate
def copy(self, name=None, deep=False, dtype=None, names=None):
"""
Make a copy of this object.
Parameters
----------
name : object, default None
Name of index, use original name when None
deep : bool, default True
Make a deep copy of the data.
With ``deep=False`` the original data is used
dtype : numpy dtype, default None
Target datatype to cast into, use original dtype when None
.. deprecated:: 23.02
The `dtype` parameter is deprecated and will be removed in
a future version of cudf. Use the `astype` method instead.
names : list-like, default False
Kept compatibility with MultiIndex. Should not be used.
.. deprecated:: 23.04
The parameter `names` is deprecated and will be removed in
a future version of cudf. Use the `name` parameter instead.
Returns
-------
New index instance, casted to new dtype
"""
if dtype is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter dtype is deprecated and will be removed in a "
"future version. Use the astype method instead.",
FutureWarning,
)
if names is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"parameter names is deprecated and will be removed in a "
"future version. Use the name parameter instead.",
FutureWarning,
)
dtype = self.dtype if dtype is None else dtype
name = self.name if name is None else name
col = self._values.astype(dtype)
return _index_from_data({name: col.copy(True) if deep else col})
@_cudf_nvtx_annotate
def astype(self, dtype, copy: bool = True):
return _index_from_data(super().astype({self.name: dtype}, copy))
@_cudf_nvtx_annotate
def get_loc(self, key, method=None, tolerance=None):
"""Get integer location, slice or boolean mask for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'fill', 'backfill'/'bfill', 'nearest'}, optional
- default: exact matches only.
- pad / ffill: find the PREVIOUS index value if no exact match.
- backfill / bfill: use NEXT index value if no exact match.
- nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index
value.
tolerance : int or float, optional
Maximum distance from index value for inexact matches. The value
of the index at the matching location must satisfy the equation
``abs(index[loc] - key) <= tolerance``.
Returns
-------
int or slice or boolean mask
- If result is unique, return integer index
- If index is monotonic, loc is returned as a slice object
- Otherwise, a boolean mask is returned
Examples
--------
>>> unique_index = cudf.Index(list('abc'))
>>> unique_index.get_loc('b')
1
>>> monotonic_index = cudf.Index(list('abbc'))
>>> monotonic_index.get_loc('b')
slice(1, 3, None)
>>> non_monotonic_index = cudf.Index(list('abcb'))
>>> non_monotonic_index.get_loc('b')
array([False, True, False, True])
>>> numeric_unique_index = cudf.Index([1, 2, 3])
>>> numeric_unique_index.get_loc(3)
2
"""
# We should not actually remove this code until we have implemented the
# get_indexers method as an alternative, see
# https://github.com/rapidsai/cudf/issues/12312
if method is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"Passing method to {self.__class__.__name__}.get_loc is "
"deprecated and will raise in a future version.",
FutureWarning,
)
if tolerance is not None:
raise NotImplementedError(
"Parameter tolerance is not supported yet."
)
if method not in {
None,
"ffill",
"bfill",
"pad",
"backfill",
"nearest",
}:
raise ValueError(
f"Invalid fill method. Expecting pad (ffill), backfill (bfill)"
f" or nearest. Got {method}"
)
is_sorted = (
self.is_monotonic_increasing or self.is_monotonic_decreasing
)
if not is_sorted and method is not None:
raise ValueError(
"index must be monotonic increasing or decreasing if `method`"
"is specified."
)
key_as_table = cudf.core.frame.Frame(
{"None": as_column(key, length=1)}
)
lower_bound, upper_bound, sort_inds = _lexsorted_equal_range(
self, key_as_table, is_sorted
)
if lower_bound == upper_bound:
# Key not found, apply method
if method in ("pad", "ffill"):
if lower_bound == 0:
raise KeyError(key)
return lower_bound - 1
elif method in ("backfill", "bfill"):
if lower_bound == self._data.nrows:
raise KeyError(key)
return lower_bound
elif method == "nearest":
if lower_bound == self._data.nrows:
return lower_bound - 1
elif lower_bound == 0:
return 0
lower_val = self._column.element_indexing(lower_bound - 1)
upper_val = self._column.element_indexing(lower_bound)
return (
lower_bound - 1
if abs(lower_val - key) < abs(upper_val - key)
else lower_bound
)
else:
raise KeyError(key)
if lower_bound + 1 == upper_bound:
# Search result is unique, return int.
return (
lower_bound
if is_sorted
else sort_inds.element_indexing(lower_bound)
)
if is_sorted:
# In monotonic index, lex search result is continuous. A slice for
# the range is returned.
return slice(lower_bound, upper_bound)
# Not sorted and not unique. Return a boolean mask
mask = cupy.full(self._data.nrows, False)
true_inds = sort_inds.slice(lower_bound, upper_bound).values
mask[true_inds] = True
return mask
@_cudf_nvtx_annotate
def __repr__(self):
max_seq_items = get_option("max_seq_items") or len(self)
mr = 0
if 2 * max_seq_items < len(self):
mr = max_seq_items + 1
if len(self) > mr and mr != 0:
top = self[0:mr]
bottom = self[-1 * mr :]
preprocess = cudf.concat([top, bottom])
else:
preprocess = self
# TODO: Change below usages accordingly to
# utilize `Index.to_string` once it is implemented
# related issue : https://github.com/pandas-dev/pandas/issues/35389
if isinstance(preprocess, CategoricalIndex):
if preprocess.categories.dtype.kind == "f":
output = repr(
preprocess.astype("str")
.to_pandas()
.astype(
dtype=pd.CategoricalDtype(
categories=preprocess.dtype.categories.astype(
"str"
).to_pandas(),
ordered=preprocess.dtype.ordered,
)
)
)
break_idx = output.find("ordered=")
output = (
output[:break_idx].replace("'", "") + output[break_idx:]
)
else:
output = repr(preprocess.to_pandas())
output = output.replace("nan", str(cudf.NA))
elif preprocess._values.nullable:
output = repr(self._clean_nulls_from_index().to_pandas())
if not isinstance(self, StringIndex):
# We should remove all the single quotes
# from the output due to the type-cast to
# object dtype happening above.
# Note : The replacing of single quotes has
# to happen only in case of non-StringIndex types,
# as we want to preserve single quotes in case
# of StringIndex and it is valid to have them.
output = output.replace("'", "")
else:
output = repr(preprocess.to_pandas())
# Fix and correct the class name of the output
# string by finding first occurrence of "(" in the output
index_class_split_index = output.find("(")
output = self.__class__.__name__ + output[index_class_split_index:]
lines = output.split("\n")
tmp_meta = lines[-1]
dtype_index = tmp_meta.rfind(" dtype=")
prior_to_dtype = tmp_meta[:dtype_index]
lines = lines[:-1]
lines.append(prior_to_dtype + " dtype='%s'" % self.dtype)
if self.name is not None:
lines[-1] = lines[-1] + ", name='%s'" % self.name
if "length" in tmp_meta:
lines[-1] = lines[-1] + ", length=%d)" % len(self)
else:
lines[-1] = lines[-1] + ")"
return "\n".join(lines)
@_cudf_nvtx_annotate
def __getitem__(self, index):
res = self._get_elements_from_column(index)
if isinstance(res, ColumnBase):
res = as_index(res)
res.name = self.name
return res
@property # type: ignore
@_cudf_nvtx_annotate
def dtype(self):
"""
`dtype` of the underlying values in GenericIndex.
"""
return self._values.dtype
@_cudf_nvtx_annotate
def isna(self):
return self._column.isnull().values
isnull = isna
@_cudf_nvtx_annotate
def notna(self):
return self._column.notnull().values
notnull = notna
def _is_numeric(self):
return False
def _is_boolean(self):
return True
def _is_integer(self):
return False
def _is_floating(self):
return False
def _is_object(self):
return False
def _is_categorical(self):
return False
def _is_interval(self):
return False
@property # type: ignore
@_cudf_nvtx_annotate
def hasnans(self):
return self._column.has_nulls(include_nan=True)
@_cudf_nvtx_annotate
def argsort(
self,
axis=0,
kind="quicksort",
order=None,
ascending=True,
na_position="last",
):
"""Return the integer indices that would sort the index.
Parameters
----------
axis : {0 or "index"}
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable
algorithms. Only quicksort is supported in cuDF.
order : None
Has no effect but is accepted for compatibility with numpy.
ascending : bool or list of bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs
at the end.
Returns
-------
cupy.ndarray: The indices sorted based on input.
""" # noqa: E501
return super().argsort(
axis=axis,
kind=kind,
order=order,
ascending=ascending,
na_position=na_position,
)
def repeat(self, repeats, axis=None):
return self._from_columns_like_self(
Frame._repeat([*self._columns], repeats, axis), self._column_names
)
@_cudf_nvtx_annotate
def where(self, cond, other=None, inplace=False):
result_col = super().where(cond, other, inplace)
return self._mimic_inplace(
_index_from_data({self.name: result_col}),
inplace=inplace,
)
@property
def values(self):
return self._column.values
def __contains__(self, item):
return item in self._values
def _clean_nulls_from_index(self):
if self._values.has_nulls():
fill_value = (
str(cudf.NaT)
if isinstance(self, (DatetimeIndex, TimedeltaIndex))
else str(cudf.NA)
)
return cudf.Index(
self._values.astype("str").fillna(fill_value),
name=self.name,
)
return self
def any(self):
return self._values.any()
def to_pandas(self, nullable=False):
return pd.Index(
self._values.to_pandas(nullable=nullable), name=self.name
)
def append(self, other):
if is_list_like(other):
to_concat = [self]
for obj in other:
if not isinstance(obj, BaseIndex):
raise TypeError("all inputs must be Index")
to_concat.append(obj)
else:
this = self
other = cudf.Index(other)
if len(this) == 0 or len(other) == 0:
# we'll filter out empties later in ._concat
to_concat = [this, other]
else:
if is_mixed_with_object_dtype(this, other):
got_dtype = (
other.dtype
if this.dtype == cudf.dtype("object")
else this.dtype
)
raise TypeError(
f"cudf does not support appending an Index of "
f"dtype `{cudf.dtype('object')}` with an Index "
f"of dtype `{got_dtype}`, please type-cast "
f"either one of them to same dtypes."
)
if isinstance(self._values, cudf.core.column.NumericalColumn):
if self.dtype != other.dtype:
this, other = numeric_normalize_types(self, other)
to_concat = [this, other]
return self._concat(to_concat)
def unique(self):
return cudf.core.index._index_from_data(
{self.name: self._values.unique()}, name=self.name
)
def isin(self, values):
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a {type(values).__name__}"
)
return self._values.isin(values).values
def _indices_of(self, value):
"""Return indices of value in index"""
return self._column.indices_of(value)
@cache
@_warn_no_dask_cudf
def __dask_tokenize__(self):
# We can use caching, because an index is immutable
return super().__dask_tokenize__()
class NumericIndex(GenericIndex):
"""Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Returns
-------
Index
"""
# Subclasses must define the dtype they are associated with.
_dtype: Union[None, Type[np.number]] = None
@_cudf_nvtx_annotate
def __init__(self, data=None, dtype=None, copy=False, name=None):
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"cudf.{self.__class__.__name__} is deprecated and will be "
"removed from cudf in a future version. Use cudf.Index with the "
"appropriate dtype instead.",
FutureWarning,
)
dtype = type(self)._dtype
if copy:
data = column.as_column(data, dtype=dtype).copy()
kwargs = _setdefault_name(data, name=name)
data = column.as_column(data, dtype=dtype)
super().__init__(data, **kwargs)
def _is_numeric(self):
return True
def _is_boolean(self):
return False
def _is_integer(self):
return True
def _is_floating(self):
return False
def _is_object(self):
return False
def _is_categorical(self):
return False
def _is_interval(self):
return False
class Int8Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Int8Index is a special case of Index with purely
integer(``int8``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
Int8Index
"""
_dtype = np.int8
class Int16Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Int16Index is a special case of Index with purely
integer(``int16``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
Int16Index
"""
_dtype = np.int16
class Int32Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Int32Index is a special case of Index with purely
integer(``int32``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
Int32Index
"""
_dtype = np.int32
class Int64Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Int64Index is a special case of Index with purely
integer(``int64``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
Int64Index
"""
_dtype = np.int64
class UInt8Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
UInt8Index is a special case of Index with purely
integer(``uint64``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
UInt8Index
"""
_dtype = np.uint8
class UInt16Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
UInt16Index is a special case of Index with purely
integer(``uint16``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
UInt16Index
"""
_dtype = np.uint16
class UInt32Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
UInt32Index is a special case of Index with purely
integer(``uint32``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
UInt32Index
"""
_dtype = np.uint32
class UInt64Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
UInt64Index is a special case of Index with purely
integer(``uint64``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
UInt64Index
"""
_dtype = np.uint64
class Float32Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Float32Index is a special case of Index with purely
float(``float32``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
Float32Index
"""
_dtype = np.float32
def _is_integer(self):
return False
def _is_floating(self):
return True
class Float64Index(NumericIndex):
"""
Immutable, ordered and sliceable sequence of labels.
The basic object storing row labels for all cuDF objects.
Float64Index is a special case of Index with purely
float(``float64``) labels.
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype,
but not used.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
Attributes
----------
None
Methods
-------
None
Returns
-------
Float64Index
"""
_dtype = np.float64
def _is_integer(self):
return False
def _is_floating(self):
return True
class DatetimeIndex(GenericIndex):
"""
Immutable , ordered and sliceable sequence of datetime64 data,
represented internally as int64.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
copy : bool
Make a copy of input.
freq : str, optional
This is not yet supported
tz : pytz.timezone or dateutil.tz.tzfile
This is not yet supported
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
This is not yet supported
name : object
Name to be stored in the index.
dayfirst : bool, default False
If True, parse dates in data with the day first order.
This is not yet supported
yearfirst : bool, default False
If True parse dates in data with the year first order.
This is not yet supported
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
dayofyear
day_of_year
weekday
quarter
freq
Methods
-------
ceil
floor
round
tz_convert
tz_localize
Returns
-------
DatetimeIndex
Examples
--------
>>> import cudf
>>> cudf.DatetimeIndex([1, 2, 3, 4], name="a")
DatetimeIndex(['1970-01-01 00:00:00.000000001',
'1970-01-01 00:00:00.000000002',
'1970-01-01 00:00:00.000000003',
'1970-01-01 00:00:00.000000004'],
dtype='datetime64[ns]', name='a')
"""
@_cudf_nvtx_annotate
def __init__(
self,
data=None,
freq=None,
tz=None,
normalize=False,
closed=None,
ambiguous="raise",
dayfirst=False,
yearfirst=False,
dtype=None,
copy=False,
name=None,
):
# we should be more strict on what we accept here but
# we'd have to go and figure out all the semantics around
# pandas dtindex creation first which. For now
# just make sure we handle np.datetime64 arrays
# and then just dispatch upstream
if freq is not None:
raise NotImplementedError("Freq is not yet supported")
if tz is not None:
raise NotImplementedError("tz is not yet supported")
if normalize is not False:
raise NotImplementedError("normalize == True is not yet supported")
if closed is not None:
raise NotImplementedError("closed is not yet supported")
if ambiguous != "raise":
raise NotImplementedError("ambiguous is not yet supported")
if dayfirst is not False:
raise NotImplementedError("dayfirst == True is not yet supported")
if yearfirst is not False:
raise NotImplementedError("yearfirst == True is not yet supported")
valid_dtypes = tuple(
f"datetime64[{res}]" for res in ("s", "ms", "us", "ns")
)
if dtype is None:
# nanosecond default matches pandas
dtype = "datetime64[ns]"
elif dtype not in valid_dtypes:
raise TypeError("Invalid dtype")
kwargs = _setdefault_name(data, name=name)
data = column.as_column(data, dtype=dtype)
if copy:
data = data.copy()
super().__init__(data, **kwargs)
def __getitem__(self, index):
value = super().__getitem__(index)
if cudf.get_option("mode.pandas_compatible") and isinstance(
value, np.datetime64
):
return pd.Timestamp(value)
return value
def searchsorted(
self,
value,
side: str = "left",
ascending: bool = True,
na_position: str = "last",
):
value = self.dtype.type(value)
return super().searchsorted(
value, side=side, ascending=ascending, na_position=na_position
)
@property # type: ignore
@_cudf_nvtx_annotate
def year(self):
"""
The year of the datetime.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="Y"))
>>> datetime_index
DatetimeIndex(['2000-12-31', '2001-12-31', '2002-12-31'], dtype='datetime64[ns]')
>>> datetime_index.year
Int16Index([2000, 2001, 2002], dtype='int16')
""" # noqa: E501
return self._get_dt_field("year")
@property # type: ignore
@_cudf_nvtx_annotate
def month(self):
"""
The month as January=1, December=12.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="M"))
>>> datetime_index
DatetimeIndex(['2000-01-31', '2000-02-29', '2000-03-31'], dtype='datetime64[ns]')
>>> datetime_index.month
Int16Index([1, 2, 3], dtype='int16')
""" # noqa: E501
return self._get_dt_field("month")
@property # type: ignore
@_cudf_nvtx_annotate
def day(self):
"""
The day of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="D"))
>>> datetime_index
DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'], dtype='datetime64[ns]')
>>> datetime_index.day
Int16Index([1, 2, 3], dtype='int16')
""" # noqa: E501
return self._get_dt_field("day")
@property # type: ignore
@_cudf_nvtx_annotate
def hour(self):
"""
The hours of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="h"))
>>> datetime_index
DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 01:00:00',
'2000-01-01 02:00:00'],
dtype='datetime64[ns]')
>>> datetime_index.hour
Int16Index([0, 1, 2], dtype='int16')
"""
return self._get_dt_field("hour")
@property # type: ignore
@_cudf_nvtx_annotate
def minute(self):
"""
The minutes of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="T"))
>>> datetime_index
DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:01:00',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]')
>>> datetime_index.minute
Int16Index([0, 1, 2], dtype='int16')
"""
return self._get_dt_field("minute")
@property # type: ignore
@_cudf_nvtx_annotate
def second(self):
"""
The seconds of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="s"))
>>> datetime_index
DatetimeIndex(['2000-01-01 00:00:00', '2000-01-01 00:00:01',
'2000-01-01 00:00:02'],
dtype='datetime64[ns]')
>>> datetime_index.second
Int16Index([0, 1, 2], dtype='int16')
"""
return self._get_dt_field("second")
@property # type: ignore
@_cudf_nvtx_annotate
def microsecond(self):
"""
The microseconds of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="us"))
>>> datetime_index
DatetimeIndex([ '2000-01-01 00:00:00', '2000-01-01 00:00:00.000001',
'2000-01-01 00:00:00.000002'],
dtype='datetime64[ns]')
>>> datetime_index.microsecond
Int32Index([0, 1, 2], dtype='int32')
""" # noqa: E501
return as_index(
(
# Need to manually promote column to int32 because
# pandas-matching binop behaviour requires that this
# __mul__ returns an int16 column.
self._values.get_dt_field("millisecond").astype("int32")
* cudf.Scalar(1000, dtype="int32")
)
+ self._values.get_dt_field("microsecond"),
name=self.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def nanosecond(self):
"""
The nanoseconds of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2000-01-01",
... periods=3, freq="ns"))
>>> datetime_index
DatetimeIndex([ '2000-01-01 00:00:00',
'2000-01-01 00:00:00.000000001',
'2000-01-01 00:00:00.000000002'],
dtype='datetime64[ns]')
>>> datetime_index.nanosecond
Int16Index([0, 1, 2], dtype='int16')
"""
return self._get_dt_field("nanosecond")
@property # type: ignore
@_cudf_nvtx_annotate
def weekday(self):
"""
The day of the week with Monday=0, Sunday=6.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2016-12-31",
... "2017-01-08", freq="D"))
>>> datetime_index
DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',
'2017-01-08'],
dtype='datetime64[ns]')
>>> datetime_index.weekday
Int16Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int16')
"""
return self._get_dt_field("weekday")
@property # type: ignore
@_cudf_nvtx_annotate
def dayofweek(self):
"""
The day of the week with Monday=0, Sunday=6.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2016-12-31",
... "2017-01-08", freq="D"))
>>> datetime_index
DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',
'2017-01-08'],
dtype='datetime64[ns]')
>>> datetime_index.dayofweek
Int16Index([5, 6, 0, 1, 2, 3, 4, 5, 6], dtype='int16')
"""
return self._get_dt_field("weekday")
@property # type: ignore
@_cudf_nvtx_annotate
def dayofyear(self):
"""
The day of the year, from 1-365 in non-leap years and
from 1-366 in leap years.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2016-12-31",
... "2017-01-08", freq="D"))
>>> datetime_index
DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',
'2017-01-08'],
dtype='datetime64[ns]')
>>> datetime_index.dayofyear
Int16Index([366, 1, 2, 3, 4, 5, 6, 7, 8], dtype='int16')
"""
return self._get_dt_field("day_of_year")
@property # type: ignore
@_cudf_nvtx_annotate
def day_of_year(self):
"""
The day of the year, from 1-365 in non-leap years and
from 1-366 in leap years.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_index = cudf.Index(pd.date_range("2016-12-31",
... "2017-01-08", freq="D"))
>>> datetime_index
DatetimeIndex(['2016-12-31', '2017-01-01', '2017-01-02', '2017-01-03',
'2017-01-04', '2017-01-05', '2017-01-06', '2017-01-07',
'2017-01-08'],
dtype='datetime64[ns]')
>>> datetime_index.day_of_year
Int16Index([366, 1, 2, 3, 4, 5, 6, 7, 8], dtype='int16')
"""
return self._get_dt_field("day_of_year")
@property # type: ignore
@_cudf_nvtx_annotate
def is_leap_year(self):
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day. Leap years are years which are
multiples of four with the exception of years divisible by 100 but not
by 400.
Returns
-------
ndarray
Booleans indicating if dates belong to a leap year.
"""
res = is_leap_year(self._values).fillna(False)
return cupy.asarray(res)
@property # type: ignore
@_cudf_nvtx_annotate
def quarter(self):
"""
Integer indicator for which quarter of the year the date belongs in.
There are 4 quarters in a year. With the first quarter being from
January - March, second quarter being April - June, third quarter
being July - September and fourth quarter being October - December.
Returns
-------
Int8Index
Integer indicating which quarter the date belongs to.
Examples
--------
>>> import cudf
>>> gIndex = cudf.DatetimeIndex(["2020-05-31 08:00:00",
... "1999-12-31 18:40:00"])
>>> gIndex.quarter
Int8Index([2, 4], dtype='int8')
"""
res = extract_quarter(self._values)
return Index(res, dtype="int8")
@_cudf_nvtx_annotate
def isocalendar(self):
"""
Returns a DataFrame with the year, week, and day
calculated according to the ISO 8601 standard.
Returns
-------
DataFrame
with columns year, week and day
Examples
--------
>>> gIndex = cudf.DatetimeIndex(["2020-05-31 08:00:00",
... "1999-12-31 18:40:00"])
>>> gIndex.isocalendar()
year week day
2020-05-31 08:00:00 2020 22 7
1999-12-31 18:40:00 1999 52 5
"""
return cudf.core.tools.datetimes._to_iso_calendar(self)
@_cudf_nvtx_annotate
def to_pandas(self, nullable=False):
# TODO: no need to convert to nanos with Pandas 2.x
if isinstance(self.dtype, pd.DatetimeTZDtype):
nanos = self._values.astype(
pd.DatetimeTZDtype("ns", self.dtype.tz)
)
else:
nanos = self._values.astype("datetime64[ns]")
return pd.DatetimeIndex(nanos.to_pandas(), name=self.name)
@_cudf_nvtx_annotate
def _get_dt_field(self, field):
out_column = self._values.get_dt_field(field)
# column.column_empty_like always returns a Column object
# but we need a NumericalColumn for GenericIndex..
# how should this be handled?
out_column = column.build_column(
data=out_column.base_data,
dtype=out_column.dtype,
mask=out_column.base_mask,
offset=out_column.offset,
)
return as_index(out_column, name=self.name)
def _is_boolean(self):
return False
@_cudf_nvtx_annotate
def ceil(self, freq):
"""
Perform ceil operation on the data to the specified freq.
Parameters
----------
freq : str
One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"].
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
See `frequency aliases <https://pandas.pydata.org/docs/\
user_guide/timeseries.html#timeseries-offset-aliases>`__
for more details on these aliases.
Returns
-------
DatetimeIndex
Index of the same type for a DatetimeIndex
Examples
--------
>>> import cudf
>>> gIndex = cudf.DatetimeIndex([
... "2020-05-31 08:05:42",
... "1999-12-31 18:40:30",
... ])
>>> gIndex.ceil("T")
DatetimeIndex(['2020-05-31 08:06:00', '1999-12-31 18:41:00'], dtype='datetime64[ns]')
""" # noqa: E501
out_column = self._values.ceil(freq)
return self.__class__._from_data({self.name: out_column})
@_cudf_nvtx_annotate
def floor(self, freq):
"""
Perform floor operation on the data to the specified freq.
Parameters
----------
freq : str
One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"].
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
See `frequency aliases <https://pandas.pydata.org/docs/\
user_guide/timeseries.html#timeseries-offset-aliases>`__
for more details on these aliases.
Returns
-------
DatetimeIndex
Index of the same type for a DatetimeIndex
Examples
--------
>>> import cudf
>>> gIndex = cudf.DatetimeIndex([
... "2020-05-31 08:59:59",
... "1999-12-31 18:44:59",
... ])
>>> gIndex.floor("T")
DatetimeIndex(['2020-05-31 08:59:00', '1999-12-31 18:44:00'], dtype='datetime64[ns]')
""" # noqa: E501
out_column = self._values.floor(freq)
return self.__class__._from_data({self.name: out_column})
@_cudf_nvtx_annotate
def round(self, freq):
"""
Perform round operation on the data to the specified freq.
Parameters
----------
freq : str
One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"].
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
See `frequency aliases <https://pandas.pydata.org/docs/\
user_guide/timeseries.html#timeseries-offset-aliases>`__
for more details on these aliases.
Returns
-------
DatetimeIndex
Index containing rounded datetimes.
Examples
--------
>>> import cudf
>>> dt_idx = cudf.Index([
... "2001-01-01 00:04:45",
... "2001-01-01 00:04:58",
... "2001-01-01 00:05:04",
... ], dtype="datetime64[ns]")
>>> dt_idx
DatetimeIndex(['2001-01-01 00:04:45', '2001-01-01 00:04:58',
'2001-01-01 00:05:04'],
dtype='datetime64[ns]')
>>> dt_idx.round('H')
DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01'], dtype='datetime64[ns]')
>>> dt_idx.round('T')
DatetimeIndex(['2001-01-01 00:05:00', '2001-01-01 00:05:00', '2001-01-01 00:05:00'], dtype='datetime64[ns]')
""" # noqa: E501
out_column = self._values.round(freq)
return self.__class__._from_data({self.name: out_column})
def tz_localize(self, tz, ambiguous="NaT", nonexistent="NaT"):
"""
Localize timezone-naive data to timezone-aware data.
Parameters
----------
tz : str
Timezone to convert timestamps to.
Returns
-------
DatetimeIndex containing timezone aware timestamps.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> tz_naive = cudf.date_range('2018-03-01 09:00', periods=3, freq='D')
>>> tz_aware = tz_naive.tz_localize("America/New_York")
>>> tz_aware
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, America/New_York]')
Ambiguous or nonexistent datetimes are converted to NaT.
>>> s = cudf.to_datetime(cudf.Series(['2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.dt.tz_localize("CET")
0 2018-10-28 01:20:00.000000000
1 NaT
2 2018-10-28 03:46:00.000000000
dtype: datetime64[ns, CET]
Notes
-----
'NaT' is currently the only supported option for the
``ambiguous`` and ``nonexistent`` arguments. Any
ambiguous or nonexistent timestamps are converted
to 'NaT'.
"""
from cudf.core._internals.timezones import delocalize, localize
if tz is None:
result_col = delocalize(self._column)
else:
result_col = localize(self._column, tz, ambiguous, nonexistent)
return DatetimeIndex._from_data({self.name: result_col})
def tz_convert(self, tz):
"""
Convert tz-aware datetimes from one time zone to another.
Parameters
----------
tz : str
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index.
A `tz` of None will convert to UTC and remove the timezone
information.
Returns
-------
DatetimeIndex containing timestamps corresponding to the timezone
`tz`.
Examples
--------
>>> import cudf
>>> dti = cudf.date_range('2018-03-01 09:00', periods=3, freq='D')
>>> dti = dti.tz_localize("America/New_York")
>>> dti
DatetimeIndex(['2018-03-01 09:00:00-05:00',
'2018-03-02 09:00:00-05:00',
'2018-03-03 09:00:00-05:00'],
dtype='datetime64[ns, America/New_York]')
>>> dti.tz_convert("Europe/London")
DatetimeIndex(['2018-03-01 14:00:00+00:00',
'2018-03-02 14:00:00+00:00',
'2018-03-03 14:00:00+00:00'],
dtype='datetime64[ns, Europe/London]')
"""
from cudf.core._internals.timezones import convert
if tz is None:
result_col = self._column._utc_time
else:
result_col = convert(self._column, tz)
return DatetimeIndex._from_data({self.name: result_col})
class TimedeltaIndex(GenericIndex):
"""
Immutable, ordered and sliceable sequence of timedelta64 data,
represented internally as int64.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with.
unit : str, optional
This is not yet supported
copy : bool
Make a copy of input.
freq : str, optional
This is not yet supported
closed : str, optional
This is not yet supported
dtype : str or :class:`numpy.dtype`, optional
Data type for the output Index. If not specified, the
default dtype will be ``timedelta64[ns]``.
name : object
Name to be stored in the index.
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
None
Returns
-------
TimedeltaIndex
Examples
--------
>>> import cudf
>>> cudf.TimedeltaIndex([1132223, 2023232, 342234324, 4234324],
... dtype="timedelta64[ns]")
TimedeltaIndex(['0 days 00:00:00.001132223', '0 days 00:00:00.002023232',
'0 days 00:00:00.342234324', '0 days 00:00:00.004234324'],
dtype='timedelta64[ns]')
>>> cudf.TimedeltaIndex([1, 2, 3, 4], dtype="timedelta64[s]",
... name="delta-index")
TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03',
'0 days 00:00:04'],
dtype='timedelta64[s]', name='delta-index')
"""
@_cudf_nvtx_annotate
def __init__(
self,
data=None,
unit=None,
freq=None,
closed=None,
dtype="timedelta64[ns]",
copy=False,
name=None,
):
if freq is not None:
raise NotImplementedError("freq is not yet supported")
if unit is not None:
raise NotImplementedError(
"unit is not yet supported, alternatively "
"dtype parameter is supported"
)
valid_dtypes = tuple(
f"timedelta64[{res}]" for res in ("s", "ms", "us", "ns")
)
if dtype not in valid_dtypes:
raise TypeError("Invalid dtype")
kwargs = _setdefault_name(data, name=name)
data = column.as_column(data, dtype=dtype)
if copy:
data = data.copy()
super().__init__(data, **kwargs)
def __getitem__(self, index):
value = super().__getitem__(index)
if cudf.get_option("mode.pandas_compatible") and isinstance(
value, np.timedelta64
):
return pd.Timedelta(value)
return value
@_cudf_nvtx_annotate
def to_pandas(self, nullable=False):
return pd.TimedeltaIndex(
self._values.to_pandas(),
name=self.name,
unit=self._values.time_unit,
)
@property # type: ignore
@_cudf_nvtx_annotate
def days(self):
"""
Number of days for each element.
"""
return as_index(arbitrary=self._values.days, name=self.name)
@property # type: ignore
@_cudf_nvtx_annotate
def seconds(self):
"""
Number of seconds (>= 0 and less than 1 day) for each element.
"""
return as_index(arbitrary=self._values.seconds, name=self.name)
@property # type: ignore
@_cudf_nvtx_annotate
def microseconds(self):
"""
Number of microseconds (>= 0 and less than 1 second) for each element.
"""
return as_index(arbitrary=self._values.microseconds, name=self.name)
@property # type: ignore
@_cudf_nvtx_annotate
def nanoseconds(self):
"""
Number of nanoseconds (>= 0 and less than 1 microsecond) for each
element.
"""
return as_index(arbitrary=self._values.nanoseconds, name=self.name)
@property # type: ignore
@_cudf_nvtx_annotate
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
"""
return self._values.components()
@property
def inferred_freq(self):
"""
Infers frequency of TimedeltaIndex.
Notes
-----
This property is currently not supported.
"""
raise NotImplementedError("inferred_freq is not yet supported")
def _is_boolean(self):
return False
class CategoricalIndex(GenericIndex):
"""
A categorical of orderable values that represent the indices of another
Column
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If categories are given,
values not in categories will be replaced with None/NaN.
categories : list-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in dtype),
they will be inferred from the data.
ordered : bool, optional
Whether or not this categorical is treated as an ordered categorical.
If not given here or in dtype, the resulting categorical will be
unordered.
dtype : CategoricalDtype or "category", optional
If CategoricalDtype, cannot be used together with categories or
ordered.
copy : bool, default False
Make a copy of input.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
Methods
-------
equals
Returns
-------
CategoricalIndex
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> cudf.CategoricalIndex(
... data=[1, 2, 3, 4], categories=[1, 2], ordered=False, name="a")
CategoricalIndex([1, 2, <NA>, <NA>], categories=[1, 2], ordered=False, dtype='category', name='a')
>>> cudf.CategoricalIndex(
... data=[1, 2, 3, 4], dtype=pd.CategoricalDtype([1, 2, 3]), name="a")
CategoricalIndex([1, 2, 3, <NA>], categories=[1, 2, 3], ordered=False, dtype='category', name='a')
""" # noqa: E501
@_cudf_nvtx_annotate
def __init__(
self,
data=None,
categories=None,
ordered=None,
dtype=None,
copy=False,
name=None,
):
if isinstance(dtype, (pd.CategoricalDtype, cudf.CategoricalDtype)):
if categories is not None or ordered is not None:
raise ValueError(
"Cannot specify `categories` or "
"`ordered` together with `dtype`."
)
if copy:
data = column.as_column(data, dtype=dtype).copy(deep=True)
kwargs = _setdefault_name(data, name=name)
if isinstance(data, CategoricalColumn):
data = data
elif isinstance(data, pd.Series) and (
is_categorical_dtype(data.dtype)
):
codes_data = column.as_column(data.cat.codes.values)
data = column.build_categorical_column(
categories=data.cat.categories,
codes=codes_data,
ordered=data.cat.ordered,
)
elif isinstance(data, (pd.Categorical, pd.CategoricalIndex)):
codes_data = column.as_column(data.codes)
data = column.build_categorical_column(
categories=data.categories,
codes=codes_data,
ordered=data.ordered,
)
else:
data = column.as_column(
data, dtype="category" if dtype is None else dtype
)
# dtype has already been taken care
dtype = None
if categories is not None:
data = data.set_categories(categories, ordered=ordered)
elif isinstance(dtype, (pd.CategoricalDtype, cudf.CategoricalDtype)):
data = data.set_categories(dtype.categories, ordered=ordered)
elif ordered is True and data.ordered is False:
data = data.as_ordered()
elif ordered is False and data.ordered is True:
data = data.as_unordered()
super().__init__(data, **kwargs)
@property # type: ignore
@_cudf_nvtx_annotate
def codes(self):
"""
The category codes of this categorical.
"""
return as_index(self._values.codes)
@property # type: ignore
@_cudf_nvtx_annotate
def categories(self):
"""
The categories of this categorical.
"""
return as_index(self._values.categories)
def _is_boolean(self):
return False
def _is_categorical(self):
return True
@_cudf_nvtx_annotate
def interval_range(
start=None,
end=None,
periods=None,
freq=None,
name=None,
closed="right",
) -> "IntervalIndex":
"""
Returns a fixed frequency IntervalIndex.
Parameters
----------
start : numeric, default None
Left bound for generating intervals.
end : numeric , default None
Right bound for generating intervals.
periods : int, default None
Number of periods to generate
freq : numeric, default None
The length of each interval. Must be consistent
with the type of start and end
name : str, default None
Name of the resulting IntervalIndex.
closed : {"left", "right", "both", "neither"}, default "right"
Whether the intervals are closed on the left-side, right-side,
both or neither.
Returns
-------
IntervalIndex
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> cudf.interval_range(start=0,end=5)
IntervalIndex([(0, 0], (1, 1], (2, 2], (3, 3], (4, 4], (5, 5]],
...closed='right',dtype='interval')
>>> cudf.interval_range(start=0,end=10, freq=2,closed='left')
IntervalIndex([[0, 2), [2, 4), [4, 6), [6, 8), [8, 10)],
...closed='left',dtype='interval')
>>> cudf.interval_range(start=0,end=10, periods=3,closed='left')
...IntervalIndex([[0.0, 3.3333333333333335),
[3.3333333333333335, 6.666666666666667),
[6.666666666666667, 10.0)],
closed='left',
dtype='interval')
"""
nargs = sum(_ is not None for _ in (start, end, periods, freq))
# we need at least three of (start, end, periods, freq)
if nargs == 2 and freq is None:
freq = 1
nargs += 1
if nargs != 3:
raise ValueError(
"Of the four parameters: start, end, periods, and "
"freq, exactly three must be specified"
)
start = cudf.Scalar(start) if start is not None else start
end = cudf.Scalar(end) if end is not None else end
periods = cudf.Scalar(int(periods)) if periods is not None else periods
freq = cudf.Scalar(freq) if freq is not None else freq
if start is None:
start = end - freq * periods
elif freq is None:
quotient, remainder = divmod((end - start).value, periods.value)
if remainder:
freq = (end - start) / periods
else:
freq = cudf.Scalar(int(quotient))
elif periods is None:
periods = cudf.Scalar(int((end - start) / freq))
elif end is None:
end = start + periods * freq
if any(
not _is_non_decimal_numeric_dtype(x.dtype)
for x in (start, periods, freq, end)
):
raise ValueError("start, end, periods, freq must be numeric values.")
periods = periods.astype("int64")
common_dtype = find_common_type((start.dtype, freq.dtype, end.dtype))
start = start.astype(common_dtype)
freq = freq.astype(common_dtype)
bin_edges = sequence(
size=periods + 1,
init=start.device_value,
step=freq.device_value,
)
left_col = bin_edges.slice(0, len(bin_edges) - 1)
right_col = bin_edges.slice(1, len(bin_edges))
if len(right_col) == 0 or len(left_col) == 0:
dtype = IntervalDtype("int64", closed)
data = column.column_empty_like_same_mask(left_col, dtype)
return IntervalIndex(data, closed=closed)
interval_col = column.build_interval_column(
left_col, right_col, closed=closed
)
return IntervalIndex(interval_col)
class IntervalIndex(GenericIndex):
"""
Immutable index of intervals that are closed on the same side.
Parameters
----------
data : array-like (1-dimensional)
Array-like containing Interval objects from which to build the
IntervalIndex.
closed : {"left", "right", "both", "neither"}, default "right"
Whether the intervals are closed on the left-side, right-side,
both or neither.
dtype : dtype or None, default None
If None, dtype will be inferred.
copy : bool, default False
Copy the input data.
name : object, optional
Name to be stored in the index.
Attributes
----------
values
Methods
-------
from_breaks
get_loc
Returns
-------
IntervalIndex
"""
@_cudf_nvtx_annotate
def __init__(
self,
data,
closed=None,
dtype=None,
copy=False,
name=None,
):
if copy:
data = column.as_column(data, dtype=dtype).copy()
kwargs = _setdefault_name(data, name=name)
if closed is None:
closed = "right"
if isinstance(data, IntervalColumn):
data = data
elif isinstance(data, pd.Series) and (is_interval_dtype(data.dtype)):
data = column.as_column(data, data.dtype)
elif isinstance(data, (pd.Interval, pd.IntervalIndex)):
data = column.as_column(
data,
dtype=dtype,
)
elif len(data) == 0:
subtype = getattr(data, "dtype", "int64")
dtype = IntervalDtype(subtype, closed)
data = column.column_empty_like_same_mask(
column.as_column(data), dtype
)
else:
data = column.as_column(data)
data.dtype.closed = closed
self.closed = closed
super().__init__(data, **kwargs)
@_cudf_nvtx_annotate
def from_breaks(breaks, closed="right", name=None, copy=False, dtype=None):
"""
Construct an IntervalIndex from an array of splits.
Parameters
----------
breaks : array-like (1-dimensional)
Left and right bounds for each interval.
closed : {"left", "right", "both", "neither"}, default "right"
Whether the intervals are closed on the left-side, right-side,
both or neither.
copy : bool, default False
Copy the input data.
name : object, optional
Name to be stored in the index.
dtype : dtype or None, default None
If None, dtype will be inferred.
Returns
-------
IntervalIndex
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> cudf.IntervalIndex.from_breaks([0, 1, 2, 3])
IntervalIndex([(0, 1], (1, 2], (2, 3]], dtype='interval[int64, right]')
"""
if copy:
breaks = column.as_column(breaks, dtype=dtype).copy()
left_col = breaks[:-1:]
right_col = breaks[+1::]
interval_col = column.build_interval_column(
left_col, right_col, closed=closed
)
return IntervalIndex(interval_col, name=name)
def __getitem__(self, index):
raise NotImplementedError(
"Getting a scalar from an IntervalIndex is not yet supported"
)
def _is_interval(self):
return True
def _is_boolean(self):
return False
def _clean_nulls_from_index(self):
return self
class StringIndex(GenericIndex):
"""String defined indices into another Column
.. deprecated:: 23.06
`StringIndex` is deprecated, use `Index` instead.
Attributes
----------
_values: A StringColumn object or NDArray of strings
name: A string
"""
@_cudf_nvtx_annotate
def __init__(self, values, copy=False, **kwargs):
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"cudf.{self.__class__.__name__} is deprecated and will be "
"removed from cudf in a future version. Use cudf.Index with the "
"appropriate dtype instead.",
FutureWarning,
)
kwargs = _setdefault_name(values, **kwargs)
if isinstance(values, StringColumn):
values = values.copy(deep=copy)
elif isinstance(values, StringIndex):
values = values._values.copy(deep=copy)
else:
values = column.as_column(values, dtype="str")
if not is_string_dtype(values.dtype):
raise ValueError(
"Couldn't create StringIndex from passed in object"
)
super().__init__(values, **kwargs)
@_cudf_nvtx_annotate
def to_pandas(self, nullable=False):
return pd.Index(
self.to_numpy(na_value=None),
name=self.name,
dtype=pd.StringDtype() if nullable else "object",
)
@_cudf_nvtx_annotate
def __repr__(self):
return (
f"{self.__class__.__name__}({self._values.values_host},"
f" dtype='object'"
+ (
f", name={pd.io.formats.printing.default_pprint(self.name)}"
if self.name is not None
else ""
)
+ ")"
)
@copy_docstring(StringMethods) # type: ignore
@property
@_cudf_nvtx_annotate
def str(self):
return StringMethods(parent=self)
def _clean_nulls_from_index(self):
if self._values.has_nulls():
return self.fillna(str(cudf.NA))
else:
return self
def _is_boolean(self):
return False
def _is_object(self):
return True
@_cudf_nvtx_annotate
def as_index(
arbitrary, nan_as_null=None, copy=False, name=no_default, dtype=None
) -> BaseIndex:
"""Create an Index from an arbitrary object
Parameters
----------
arbitrary : object
Object to construct the Index from. See *Notes*.
nan_as_null : bool, optional, default None
If None (default), treats NaN values in arbitrary as null.
If True, combines the mask and NaNs to
form a new validity mask. If False, leaves NaN values as is.
copy : bool, default False
If True, Make copies of `arbitrary` if possible and create an
Index out of it.
If False, `arbitrary` will be shallow-copied if it is a
device-object to construct an Index.
name : object, optional
Name of the index being created, by default it is `None`.
dtype : optional
Optionally typecast the constructed Index to the given
dtype.
Returns
-------
result : subclass of Index
- CategoricalIndex for Categorical input.
- DatetimeIndex for Datetime input.
- GenericIndex for all other inputs.
Notes
-----
Currently supported inputs are:
* ``Column``
* ``Buffer``
* ``Series``
* ``Index``
* numba device array
* numpy array
* pyarrow array
* pandas.Categorical
"""
if name is no_default:
name = getattr(arbitrary, "name", None)
if isinstance(arbitrary, cudf.MultiIndex):
if dtype is not None:
raise TypeError(
"dtype must be `None` for inputs of type: "
f"{type(arbitrary).__name__}, found {dtype=} "
)
return arbitrary.copy(deep=copy)
elif isinstance(arbitrary, BaseIndex):
idx = arbitrary.copy(deep=copy).rename(name)
elif isinstance(arbitrary, ColumnBase):
idx = _index_from_data({name: arbitrary})
elif isinstance(arbitrary, cudf.Series):
return as_index(
arbitrary._column,
nan_as_null=nan_as_null,
copy=copy,
name=name,
dtype=dtype,
)
elif isinstance(arbitrary, (pd.RangeIndex, range)):
idx = RangeIndex(
start=arbitrary.start,
stop=arbitrary.stop,
step=arbitrary.step,
name=name,
)
elif isinstance(arbitrary, pd.MultiIndex):
if dtype is not None:
raise TypeError(
"dtype must be `None` for inputs of type: "
f"{type(arbitrary).__name__}, found {dtype=} "
)
return cudf.MultiIndex.from_pandas(
arbitrary.copy(deep=copy), nan_as_null=nan_as_null
)
elif isinstance(arbitrary, cudf.DataFrame) or is_scalar(arbitrary):
raise ValueError("Index data must be 1-dimensional and list-like")
else:
return as_index(
column.as_column(arbitrary, dtype=dtype, nan_as_null=nan_as_null),
copy=copy,
name=name,
dtype=dtype,
)
if dtype is not None:
idx = idx.astype(dtype)
return idx
_dtype_to_index: Dict[Any, Type[NumericIndex]] = {
np.int8: Int8Index,
np.int16: Int16Index,
np.int32: Int32Index,
np.int64: Int64Index,
np.uint8: UInt8Index,
np.uint16: UInt16Index,
np.uint32: UInt32Index,
np.uint64: UInt64Index,
np.float32: Float32Index,
np.float64: Float64Index,
}
def _setdefault_name(values, **kwargs):
if kwargs.get("name") is None:
kwargs["name"] = getattr(values, "name", None)
return kwargs
class IndexMeta(type):
"""Custom metaclass for Index that overrides instance/subclass tests."""
def __instancecheck__(self, instance):
return isinstance(instance, BaseIndex)
def __subclasscheck__(self, subclass):
return issubclass(subclass, BaseIndex)
class Index(BaseIndex, metaclass=IndexMeta):
"""The basic object storing row labels for all cuDF objects.
Parameters
----------
data : array-like (1-dimensional)/ DataFrame
If it is a DataFrame, it will return a MultiIndex
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
copy : bool
Make a copy of input data.
name : object
Name to be stored in the index.
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible.
tupleize_cols == False is not yet supported.
nan_as_null : bool, Default True
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Returns
-------
Index
cudf Index
Warnings
--------
This class should not be subclassed. It is designed as a factory for
different subclasses of `BaseIndex` depending on the provided input.
If you absolutely must, and if you're intimately familiar with the
internals of cuDF, subclass `BaseIndex` instead.
Examples
--------
>>> import cudf
>>> cudf.Index([1, 2, 3], dtype="uint64", name="a")
UInt64Index([1, 2, 3], dtype='uint64', name='a')
"""
@_cudf_nvtx_annotate
def __new__(
cls,
data=None,
dtype=None,
copy=False,
name=no_default,
tupleize_cols=True,
nan_as_null=True,
**kwargs,
):
assert (
cls is Index
), "Index cannot be subclassed, extend BaseIndex instead."
if tupleize_cols is not True:
raise NotImplementedError(
"tupleize_cols != True is not yet supported"
)
res = as_index(
data,
copy=copy,
dtype=dtype,
name=name,
nan_as_null=nan_as_null,
**kwargs,
)
if (
isinstance(data, Sequence)
and not isinstance(data, range)
and len(data) == 0
and dtype is None
and getattr(data, "dtype", None) is None
):
return res.astype("str")
return res
@classmethod
@_cudf_nvtx_annotate
def from_arrow(cls, obj):
try:
return cls(ColumnBase.from_arrow(obj))
except TypeError:
# Try interpreting object as a MultiIndex before failing.
return cudf.MultiIndex.from_arrow(obj)
@cached_property
def is_monotonic_increasing(self):
return super().is_monotonic_increasing
@cached_property
def is_monotonic_decreasing(self):
return super().is_monotonic_decreasing
@_cudf_nvtx_annotate
def _concat_range_index(indexes: List[RangeIndex]) -> BaseIndex:
"""
An internal Utility function to concat RangeIndex objects.
"""
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in indexes if len(obj)]
if not non_empty_indexes:
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0)
for obj in non_empty_indexes:
if start is None:
# This is set by the first non-empty index
start = obj.start
if step is None and len(obj) > 1:
step = obj.step
elif step is None:
# First non-empty index had only one element
if obj.start == start:
result = as_index(concat_columns([x._values for x in indexes]))
return result
step = obj.start - start
non_consecutive = (step != obj.step and len(obj) > 1) or (
next_ is not None and obj.start != next_
)
if non_consecutive:
result = as_index(concat_columns([x._values for x in indexes]))
return result
if step is not None:
next_ = obj[-1] + step
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step)
@_cudf_nvtx_annotate
def _extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/single_column_frame.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
"""Base class for Frame types that only have a single column."""
from __future__ import annotations
import warnings
from typing import Any, Dict, Optional, Tuple, Union
import cupy
import numpy
import cudf
from cudf._typing import Dtype, NotImplementedType, ScalarLike
from cudf.api.extensions import no_default
from cudf.api.types import (
_is_scalar_or_zero_d_array,
is_bool_dtype,
is_integer,
is_integer_dtype,
)
from cudf.core.column import ColumnBase, as_column
from cudf.core.frame import Frame
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import NotIterable
class SingleColumnFrame(Frame, NotIterable):
"""A one-dimensional frame.
Frames with only a single column share certain logic that is encoded in
this class.
"""
_SUPPORT_AXIS_LOOKUP = {
0: 0,
"index": 0,
}
@_cudf_nvtx_annotate
def _reduce(
self,
op,
axis=no_default,
level=None,
numeric_only=None,
**kwargs,
):
if axis not in (None, 0, no_default):
raise NotImplementedError("axis parameter is not implemented yet")
if level is not None:
raise NotImplementedError("level parameter is not implemented yet")
if numeric_only and not isinstance(
self._column, cudf.core.column.numerical_base.NumericalBaseColumn
):
raise NotImplementedError(
f"Series.{op} does not implement numeric_only."
)
try:
return getattr(self._column, op)(**kwargs)
except AttributeError:
raise TypeError(f"cannot perform {op} with type {self.dtype}")
@_cudf_nvtx_annotate
def _scan(self, op, axis=None, *args, **kwargs):
if axis not in (None, 0):
raise NotImplementedError("axis parameter is not implemented yet")
return super()._scan(op, axis=axis, *args, **kwargs)
@property # type: ignore
@_cudf_nvtx_annotate
def name(self):
"""Get the name of this object."""
return next(iter(self._data.names))
@name.setter # type: ignore
@_cudf_nvtx_annotate
def name(self, value):
self._data[value] = self._data.pop(self.name)
@property # type: ignore
@_cudf_nvtx_annotate
def ndim(self): # noqa: D401
"""Number of dimensions of the underlying data, by definition 1."""
return 1
@property # type: ignore
@_cudf_nvtx_annotate
def shape(self):
"""Get a tuple representing the dimensionality of the Index."""
return (len(self),)
def __bool__(self):
raise TypeError(
f"The truth value of a {type(self)} is ambiguous. Use "
"a.empty, a.bool(), a.item(), a.any() or a.all()."
)
@property # type: ignore
@_cudf_nvtx_annotate
def _num_columns(self):
return 1
@property # type: ignore
@_cudf_nvtx_annotate
def _column(self):
return self._data[self.name]
@_column.setter # type: ignore
@_cudf_nvtx_annotate
def _column(self, value):
self._data[self.name] = value
@property # type: ignore
@_cudf_nvtx_annotate
def values(self): # noqa: D102
return self._column.values
@property # type: ignore
@_cudf_nvtx_annotate
def values_host(self): # noqa: D102
return self._column.values_host
@_cudf_nvtx_annotate
def to_cupy(
self,
dtype: Union[Dtype, None] = None,
copy: bool = True,
na_value=None,
) -> cupy.ndarray: # noqa: D102
return super().to_cupy(dtype, copy, na_value).flatten()
@_cudf_nvtx_annotate
def to_numpy(
self,
dtype: Union[Dtype, None] = None,
copy: bool = True,
na_value=None,
) -> numpy.ndarray: # noqa: D102
return super().to_numpy(dtype, copy, na_value).flatten()
@classmethod
@_cudf_nvtx_annotate
def from_arrow(cls, array):
"""Create from PyArrow Array/ChunkedArray.
Parameters
----------
array : PyArrow Array/ChunkedArray
PyArrow Object which has to be converted.
Raises
------
TypeError for invalid input type.
Returns
-------
SingleColumnFrame
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> cudf.Index.from_arrow(pa.array(["a", "b", None]))
StringIndex(['a' 'b' None], dtype='object')
>>> cudf.Series.from_arrow(pa.array(["a", "b", None]))
0 a
1 b
2 <NA>
dtype: object
"""
return cls(ColumnBase.from_arrow(array))
@_cudf_nvtx_annotate
def to_arrow(self):
"""
Convert to a PyArrow Array.
Returns
-------
PyArrow Array
Examples
--------
>>> import cudf
>>> sr = cudf.Series(["a", "b", None])
>>> sr.to_arrow()
<pyarrow.lib.StringArray object at 0x7f796b0e7600>
[
"a",
"b",
null
]
>>> ind = cudf.Index(["a", "b", None])
>>> ind.to_arrow()
<pyarrow.lib.StringArray object at 0x7f796b0e7750>
[
"a",
"b",
null
]
"""
return self._column.to_arrow()
@property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic(self):
"""Return boolean if values in the object are monotonically increasing.
This property is an alias for :attr:`is_monotonic_increasing`.
Returns
-------
bool
"""
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"is_monotonic is deprecated and will be removed in a future "
"version. Use is_monotonic_increasing instead.",
FutureWarning,
)
return self.is_monotonic_increasing
@property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic_increasing(self):
"""Return boolean if values in the object are monotonically increasing.
Returns
-------
bool
"""
return self._column.is_monotonic_increasing
@property # type: ignore
@_cudf_nvtx_annotate
def is_monotonic_decreasing(self):
"""Return boolean if values in the object are monotonically decreasing.
Returns
-------
bool
"""
return self._column.is_monotonic_decreasing
@property # type: ignore
@_cudf_nvtx_annotate
def __cuda_array_interface__(self):
# While the parent column class has a `__cuda_array_interface__` method
# defined, it is not implemented for all column types. When it is not
# implemented, though, at the Frame level we really want to throw an
# AttributeError.
try:
return self._column.__cuda_array_interface__
except NotImplementedError:
raise AttributeError
@_cudf_nvtx_annotate
def factorize(self, sort=False, na_sentinel=None, use_na_sentinel=None):
"""Encode the input values as integer labels.
Parameters
----------
sort : bool, default True
Sort uniques and shuffle codes to maintain the relationship.
na_sentinel : number, default -1
Value to indicate missing category.
.. deprecated:: 23.04
The na_sentinel argument is deprecated and will be removed in
a future version of cudf. Specify use_na_sentinel as
either True or False.
use_na_sentinel : bool, default True
If True, the sentinel -1 will be used for NA values.
If False, NA values will be encoded as non-negative
integers and will not drop the NA from the uniques
of the values.
Returns
-------
(labels, cats) : (cupy.ndarray, cupy.ndarray or Index)
- *labels* contains the encoded values
- *cats* contains the categories in order that the N-th
item corresponds to the (N-1) code.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'a', 'c'])
>>> codes, uniques = s.factorize()
>>> codes
array([0, 0, 1], dtype=int8)
>>> uniques
StringIndex(['a' 'c'], dtype='object')
"""
return cudf.core.algorithms.factorize(
self,
sort=sort,
na_sentinel=na_sentinel,
use_na_sentinel=use_na_sentinel,
)
@_cudf_nvtx_annotate
def _make_operands_for_binop(
self,
other: Any,
fill_value: Any = None,
reflect: bool = False,
*args,
**kwargs,
) -> Union[
Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]],
NotImplementedType,
]:
"""Generate the dictionary of operands used for a binary operation.
Parameters
----------
other : SingleColumnFrame
The second operand.
fill_value : Any, default None
The value to replace null values with. If ``None``, nulls are not
filled before the operation.
reflect : bool, default False
If ``True``, swap the order of the operands. See
https://docs.python.org/3/reference/datamodel.html#object.__ror__
for more information on when this is necessary.
Returns
-------
Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]]
The operands to be passed to _colwise_binop.
"""
# Get the appropriate name for output operations involving two objects
# that are Series-like objects. The output shares the lhs's name unless
# the rhs is a _differently_ named Series-like object.
if isinstance(
other, SingleColumnFrame
) and not cudf.utils.utils._is_same_name(self.name, other.name):
result_name = None
else:
result_name = self.name
if isinstance(other, SingleColumnFrame):
other = other._column
elif not _is_scalar_or_zero_d_array(other):
if not hasattr(
other, "__cuda_array_interface__"
) and not isinstance(other, cudf.RangeIndex):
return NotImplemented
# Non-scalar right operands are valid iff they convert to columns.
try:
other = as_column(other)
except Exception:
return NotImplemented
return {result_name: (self._column, other, reflect, fill_value)}
@_cudf_nvtx_annotate
def nunique(self, dropna: bool = True):
"""
Return count of unique values for the column.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
int
Number of unique values in the column.
"""
if self._column.null_count == len(self):
return 0
return self._column.distinct_count(dropna=dropna)
def _get_elements_from_column(self, arg) -> Union[ScalarLike, ColumnBase]:
# A generic method for getting elements from a column that supports a
# wide range of different inputs. This method should only used where
# _absolutely_ necessary, since in almost all cases a more specific
# method can be used e.g. element_indexing or slice.
if _is_scalar_or_zero_d_array(arg):
if not is_integer(arg):
raise ValueError(
"Can only select elements with an integer, "
f"not a {type(arg).__name__}"
)
return self._column.element_indexing(int(arg))
elif isinstance(arg, slice):
start, stop, stride = arg.indices(len(self))
return self._column.slice(start, stop, stride)
else:
arg = as_column(arg)
if len(arg) == 0:
arg = as_column([], dtype="int32")
if is_integer_dtype(arg.dtype):
return self._column.take(arg)
if is_bool_dtype(arg.dtype):
if (bn := len(arg)) != (n := len(self)):
raise IndexError(
f"Boolean mask has wrong length: {bn} not {n}"
)
return self._column.apply_boolean_mask(arg)
raise NotImplementedError(f"Unknown indexer {type(arg)}")
@_cudf_nvtx_annotate
def where(self, cond, other=None, inplace=False):
from cudf.core._internals.where import (
_check_and_cast_columns_with_other,
_make_categorical_like,
)
if isinstance(other, cudf.DataFrame):
raise NotImplementedError(
"cannot align with a higher dimensional Frame"
)
cond = as_column(cond)
if len(cond) != len(self):
raise ValueError(
"""Array conditional must be same shape as self"""
)
if not cudf.api.types.is_scalar(other):
other = cudf.core.column.as_column(other)
self_column = self._column
input_col, other = _check_and_cast_columns_with_other(
source_col=self_column, other=other, inplace=inplace
)
result = cudf._lib.copying.copy_if_else(input_col, other, cond)
return _make_categorical_like(result, self_column)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/copy_types.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, cast
from typing_extensions import Self
import cudf
import cudf._lib as libcudf
from cudf._lib.types import size_type_dtype
if TYPE_CHECKING:
from cudf.core.column import NumericalColumn
@dataclass
class GatherMap:
"""A representation of a column as a gather map.
This object augments the column with the information that it
is valid as a gather map for the specified number of rows with
the given nullification flag.
Parameters
----------
column
The data to turn into a column and then verify
nrows
The number of rows to verify against
nullify
Will the gather map be used nullifying out of bounds
accesses?
Returns
-------
GatherMap
New object wrapping the column bearing witness to its
suitability as a gather map for columns with nrows.
Raises
------
TypeError
If the column is of unsuitable dtype
IndexError
If the map is not in bounds.
"""
#: The gather map
column: "NumericalColumn"
#: The number of rows the gather map has been validated for
nrows: int
#: Was the validation for nullify=True?
nullify: bool
def __init__(self, column: Any, nrows: int, *, nullify: bool):
self.column = cudf.core.column.as_column(column)
self.nrows = nrows
self.nullify = nullify
if len(self.column) == 0:
# Any empty column is valid as a gather map
# This is necessary because as_column([]) defaults to float64
# TODO: we should fix this further up.
# Alternately we can have an Optional[Column] and handle None
# specially in _gather.
self.column = cast(
"NumericalColumn", self.column.astype(size_type_dtype)
)
else:
if self.column.dtype.kind not in {"i", "u"}:
raise TypeError("Gather map must have integer dtype")
if not nullify:
lo, hi = libcudf.reduce.minmax(self.column)
if lo.value < -nrows or hi.value >= nrows:
raise IndexError(
f"Gather map is out of bounds for [0, {nrows})"
)
@classmethod
def from_column_unchecked(
cls, column: "NumericalColumn", nrows: int, *, nullify: bool
) -> Self:
"""Construct a new GatherMap from a column without checks.
Parameters
----------
column
The column that will be used as a gather map
nrows
The number of rows the gather map will be used for
nullify
Will the gather map be used nullifying out of bounds
accesses?
Returns
-------
GatherMap
Notes
-----
This method asserts, by fiat, that the column is valid.
Behaviour is undefined if it is not.
"""
self = cls.__new__(cls)
self.column = column
self.nrows = nrows
self.nullify = nullify
return self
@dataclass
class BooleanMask:
"""A representation of a column as a boolean mask.
This augments the column with information that it is valid as a
boolean mask for columns with a given number of rows
Parameters
----------
column
The data to turn into a column to then verify
nrows
the number of rows to verify against
Returns
-------
BooleanMask
New object wrapping the column bearing witness to its
suitability as a boolean mask for columns with matching
row count.
Raises
------
TypeError
If the column is of unsuitable dtype
IndexError
If the mask has the wrong number of rows
"""
#: The boolean mask
column: "NumericalColumn"
def __init__(self, column: Any, nrows: int):
self.column = cudf.core.column.as_column(column)
if self.column.dtype.kind != "b":
raise TypeError("Boolean mask must have bool dtype")
if len(column) != nrows:
raise IndexError(
f"Column with {len(column)} rows not suitable "
f"as a boolean mask for {nrows} rows"
)
@classmethod
def from_column_unchecked(cls, column: "NumericalColumn") -> Self:
"""Construct a new BooleanMask from a column without checks.
Parameters
----------
column
The column that will be used as a boolean mask
Returns
-------
BooleanMask
Notes
-----
This method asserts, by fiat, that the column is valid.
Behaviour is undefined if it is not.
"""
self = cls.__new__(cls)
self.column = column
return self
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/__init__.py
|
# Copyright (c) 2018-2021, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/tokenize_vocabulary.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
from __future__ import annotations
import cudf
from cudf._lib.nvtext.tokenize import (
TokenizeVocabulary as cpp_tokenize_vocabulary,
tokenize_with_vocabulary as cpp_tokenize_with_vocabulary,
)
class TokenizeVocabulary:
"""
A vocabulary object used to tokenize input text.
Parameters
----------
vocabulary : str
Strings column of vocabulary terms
"""
def __init__(self, vocabulary: "cudf.Series"):
self.vocabulary = cpp_tokenize_vocabulary(vocabulary._column)
def tokenize(self, text, delimiter: str = "", default_id: int = -1):
"""
Parameters
----------
text : cudf string series
The strings to be tokenized.
delimiter : str
Delimiter to identify tokens. Default is whitespace.
default_id : int
Value to use for tokens not found in the vocabulary.
Default is -1.
Returns
-------
Tokenized strings
"""
if delimiter is None:
delimiter = ""
delim = cudf.Scalar(delimiter, dtype="str")
result = cpp_tokenize_with_vocabulary(
text._column, self.vocabulary, delim, default_id
)
return cudf.Series(result)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/cut.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
from collections import abc
import cupy
import numpy as np
import pandas as pd
import cudf
from cudf.api.types import is_list_like
from cudf.core.column import as_column, build_categorical_column
from cudf.core.index import IntervalIndex, interval_range
def cut(
x,
bins,
right: bool = True,
labels=None,
retbins: bool = False,
precision: int = 3,
include_lowest: bool = False,
duplicates: str = "raise",
ordered: bool = True,
):
"""Bin values into discrete intervals.
Use cut when you need to segment and sort data values into bins. This
function is also useful for going from a continuous variable to a
categorical variable.
Parameters
----------
x : array-like
The input array to be binned. Must be 1-dimensional.
bins : int, sequence of scalars, or IntervalIndex
The criteria to bin by.
* int : Defines the number of equal-width bins in the range of `x`. The
range of `x` is extended by .1% on each side to include the minimum
and maximum values of `x`.
* sequence of scalars : Defines the bin edges allowing for non-uniform
width. No extension of the range of `x` is done.
* IntervalIndex : Defines the exact bins to be used. Note that
IntervalIndex for `bins` must be non-overlapping.
right : bool, default True
Indicates whether bins includes the rightmost edge or not.
labels : array or False, default None
Specifies the labels for the returned bins. Must be the same
length as the resulting bins. If False, returns only integer
indicators of the bins. If True,raises an error. When ordered=False,
labels must be provided.
retbins : bool, default False
Whether to return the bins or not.
precision : int, default 3
The precision at which to store and display the bins labels.
include_lowest : bool, default False
Whether the first interval should be left-inclusive or not.
duplicates : {default 'raise', 'drop'}, optional
If bin edges are not unique, raise ValueError or drop non-uniques.
ordered : bool, default True
Whether the labels are ordered or not. Applies to returned types
Categorical and Series (with Categorical dtype). If True,
the resulting categorical will be ordered. If False, the resulting
categorical will be unordered (labels must be provided).
Returns
-------
out : CategoricalIndex
An array-like object representing the respective bin for each value
of x. The type depends on the value of labels.
bins : numpy.ndarray or IntervalIndex.
The computed or specified bins. Only returned when retbins=True.
For scalar or sequence bins, this is an ndarray with the computed
bins. If set duplicates=drop, bins will drop non-unique bin. For
an IntervalIndex bins, this is equal to bins.
Examples
--------
Discretize into three equal-sized bins.
>>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
CategoricalIndex([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0],
(5.0, 7.0], (0.994, 3.0]], categories=[(0.994, 3.0],
(3.0, 5.0], (5.0, 7.0]], ordered=True, dtype='category')
>>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
(CategoricalIndex([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0],
(5.0, 7.0], (0.994, 3.0]], categories=[(0.994, 3.0],
(3.0, 5.0], (5.0, 7.0]], ordered=True, dtype='category'),
array([0.994, 3. , 5. , 7. ]))
>>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]),
... 3, labels=["bad", "medium", "good"])
CategoricalIndex(['bad', 'good', 'medium', 'medium', 'good', 'bad'],
categories=['bad', 'medium', 'good'],ordered=True,
dtype='category')
>>> cudf.cut(np.array([1, 7, 5, 4, 6, 3]), 3,
... labels=["B", "A", "B"], ordered=False)
CategoricalIndex(['B', 'B', 'A', 'A', 'B', 'B'], categories=['A', 'B'],
ordered=False, dtype='category')
>>> cudf.cut([0, 1, 1, 2], bins=4, labels=False)
array([0, 1, 1, 3], dtype=int32)
Passing a Series as an input returns a Series with categorical dtype:
>>> s = cudf.Series(np.array([2, 4, 6, 8, 10]),
... index=['a', 'b', 'c', 'd', 'e'])
>>> cudf.cut(s, 3)
"""
left_inclusive = False
right_inclusive = True
# saving the original input x for use in case its a series
orig_x = x
old_bins = bins
if not ordered and labels is None:
raise ValueError("'labels' must be provided if 'ordered = False'")
if duplicates not in ["raise", "drop"]:
raise ValueError(
"invalid value for 'duplicates' parameter, valid options are: "
"raise, drop"
)
if labels is not False:
if not (labels is None or is_list_like(labels)):
raise ValueError(
"Bin labels must either be False, None or passed in as a "
"list-like argument"
)
if ordered and labels is not None:
if len(set(labels)) != len(labels):
raise ValueError(
"labels must be unique if ordered=True;"
"pass ordered=False for duplicate labels"
)
# bins can either be an int, sequence of scalars or an intervalIndex
if isinstance(bins, abc.Sequence):
if len(set(bins)) is not len(bins):
if duplicates == "raise":
raise ValueError(
f"Bin edges must be unique: {repr(bins)}.\n"
f"You can drop duplicate edges by setting the 'duplicates'"
"kwarg"
)
elif duplicates == "drop":
# get unique values but maintain list dtype
bins = list(dict.fromkeys(bins))
# if bins is an intervalIndex we ignore the value of right
elif isinstance(bins, (pd.IntervalIndex, cudf.IntervalIndex)):
right = bins.closed == "right"
# create bins if given an int or single scalar
if not isinstance(bins, pd.IntervalIndex):
if not isinstance(bins, (abc.Sequence)):
if isinstance(
x, (pd.Series, cudf.Series, np.ndarray, cupy.ndarray)
):
mn = x.min()
mx = x.max()
else:
mn = min(x)
mx = max(x)
bins = np.linspace(mn, mx, bins + 1, endpoint=True)
adj = (mx - mn) * 0.001
if right:
bins[0] -= adj
else:
bins[-1] += adj
# if right and include lowest we adjust the first
# bin edge to make sure it is included
if right and include_lowest:
bins[0] = bins[0] - 10 ** (-precision)
# if right is false the last bin edge is not included
if not right:
right_edge = bins[-1]
x = cupy.asarray(x)
x[x == right_edge] = right_edge + 1
# adjust bin edges decimal precision
int_label_bins = np.around(bins, precision)
# the inputs is a column of the values in the array x
input_arr = as_column(x)
# checking for the correct inclusivity values
if right:
closed = "right"
else:
closed = "left"
left_inclusive = True
if isinstance(bins, pd.IntervalIndex):
interval_labels = bins
elif labels is None:
if duplicates == "drop" and len(bins) == 1 and len(old_bins) != 1:
if right and include_lowest:
old_bins[0] = old_bins[0] - 10 ** (-precision)
interval_labels = interval_range(
old_bins[0], old_bins[1], periods=1, closed=closed
)
else:
interval_labels = IntervalIndex.from_breaks(
old_bins, closed=closed
)
else:
# get labels for categories
interval_labels = IntervalIndex.from_breaks(
int_label_bins, closed=closed
)
elif labels is not False:
if not (is_list_like(labels)):
raise ValueError(
"Bin labels must either be False, None or passed in as a "
"list-like argument"
)
if ordered and len(set(labels)) != len(labels):
raise ValueError(
"labels must be unique if ordered=True; "
"pass ordered=False for"
"duplicate labels"
)
if len(labels) != len(bins) - 1:
raise ValueError(
"Bin labels must be one fewer than the number of bin edges"
)
if not ordered and len(set(labels)) != len(labels):
interval_labels = cudf.CategoricalIndex(
labels, categories=None, ordered=False
)
else:
interval_labels = (
labels if len(set(labels)) == len(labels) else None
)
if isinstance(bins, pd.IntervalIndex):
# get the left and right edges of the bins as columns
# we cannot typecast an IntervalIndex, so we need to
# make the edges the same type as the input array
left_edges = as_column(bins.left).astype(input_arr.dtype)
right_edges = as_column(bins.right).astype(input_arr.dtype)
else:
# get the left and right edges of the bins as columns
left_edges = as_column(bins[:-1:], dtype="float64")
right_edges = as_column(bins[+1::], dtype="float64")
# the input arr must be changed to the same type as the edges
input_arr = input_arr.astype(left_edges.dtype)
# get the indexes for the appropriate number
index_labels = cudf._lib.labeling.label_bins(
input_arr, left_edges, left_inclusive, right_edges, right_inclusive
)
if labels is False:
# if labels is false we return the index labels, we return them
# as a series if we have a series input
if isinstance(orig_x, (pd.Series, cudf.Series)):
# need to run more tests but looks like in this case pandas
# always returns a float64 dtype
indx_arr_series = cudf.Series(index_labels, dtype="float64")
# if retbins we return the bins as well
if retbins:
return indx_arr_series, bins
else:
return indx_arr_series
elif retbins:
return index_labels.values, bins
else:
return index_labels.values
if labels is not None:
if labels is not ordered and len(set(labels)) != len(labels):
# when we have duplicate labels and ordered is False, we
# should allow duplicate categories.
return interval_labels[index_labels]
col = build_categorical_column(
categories=interval_labels,
codes=index_labels,
mask=index_labels.base_mask,
offset=index_labels.offset,
size=index_labels.size,
ordered=ordered,
)
# we return a categorical index, as we don't have a Categorical method
categorical_index = cudf.core.index.as_index(col)
if isinstance(orig_x, (pd.Series, cudf.Series)):
# if we have a series input we return a series output
res_series = cudf.Series(categorical_index, index=orig_x.index)
if retbins:
return res_series, bins
else:
return res_series
elif retbins:
# if retbins is true we return the bins as well
return categorical_index, bins
else:
return categorical_index
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/indexed_frame.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
"""Base class for Frame types that have an index."""
from __future__ import annotations
import numbers
import operator
import textwrap
import warnings
from collections import Counter, abc
from functools import cached_property
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from uuid import uuid4
import cupy as cp
import numpy as np
import pandas as pd
from typing_extensions import Self
import cudf
import cudf._lib as libcudf
from cudf._lib.types import size_type_dtype
from cudf._typing import (
ColumnLike,
DataFrameOrSeries,
Dtype,
NotImplementedType,
)
from cudf.api.extensions import no_default
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_decimal_dtype,
is_dict_like,
is_list_dtype,
is_list_like,
is_scalar,
)
from cudf.core._base_index import BaseIndex
from cudf.core.buffer import acquire_spill_lock
from cudf.core.column import ColumnBase, as_column, full
from cudf.core.column_accessor import ColumnAccessor
from cudf.core.copy_types import BooleanMask, GatherMap
from cudf.core.dtypes import ListDtype
from cudf.core.frame import Frame
from cudf.core.groupby.groupby import GroupBy
from cudf.core.index import Index, RangeIndex, _index_from_columns
from cudf.core.missing import NA
from cudf.core.multiindex import MultiIndex
from cudf.core.resample import _Resampler
from cudf.core.udf.utils import (
_compile_or_get,
_get_input_args_from_frame,
_post_process_output_col,
_return_arr_from_dtype,
)
from cudf.utils import docutils
from cudf.utils._numba import _CUDFNumbaConfig
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import _warn_no_dask_cudf
doc_reset_index_template = """
Reset the index of the {klass}, or a level of it.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
{argument}
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
Returns
-------
{return_type}
{klass} with the new index or None if ``inplace=True``.{return_doc}
Examples
--------
{example}
"""
doc_binop_template = textwrap.dedent(
"""
Get {operation} of DataFrame or Series and other, element-wise (binary
operator `{op_name}`).
Equivalent to ``frame + other``, but with support to substitute a
``fill_value`` for missing data in one of the inputs.
Parameters
----------
other : scalar, sequence, Series, or DataFrame
Any single or multiple element data structure, or list-like object.
axis : int or string
Only ``0`` is supported for series, ``1`` or ``columns`` supported
for dataframe
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level. Not yet supported.
fill_value : float or None, default None
Fill existing missing (NaN) values, and any new element needed
for successful DataFrame alignment, with this value before
computation. If data in both corresponding DataFrame locations
is missing the result will be missing.
Returns
-------
DataFrame or Series
Result of the arithmetic operation.
Examples
--------
**DataFrame**
>>> df = cudf.DataFrame(
... {{'angles': [0, 3, 4], 'degrees': [360, 180, 360]}},
... index=['circle', 'triangle', 'rectangle']
... )
{df_op_example}
**Series**
>>> a = cudf.Series([1, 1, 1, None], index=['a', 'b', 'c', 'd'])
>>> b = cudf.Series([1, None, 1, None], index=['a', 'b', 'd', 'e'])
{ser_op_example}
"""
)
def _get_host_unique(array):
if isinstance(array, (cudf.Series, cudf.Index, ColumnBase)):
return array.unique.to_pandas()
elif isinstance(array, (str, numbers.Number)):
return [array]
else:
return set(array)
def _drop_columns(f: Frame, columns: abc.Iterable, errors: str):
for c in columns:
try:
f._drop_column(c)
except KeyError as e:
if errors == "ignore":
pass
else:
raise e
def _indices_from_labels(obj, labels):
if not isinstance(labels, cudf.MultiIndex):
labels = cudf.core.column.as_column(labels)
if is_categorical_dtype(obj.index):
labels = labels.astype("category")
codes = labels.codes.astype(obj.index._values.codes.dtype)
labels = cudf.core.column.build_categorical_column(
categories=labels.dtype.categories,
codes=codes,
ordered=labels.dtype.ordered,
)
else:
labels = labels.astype(obj.index.dtype)
# join is not guaranteed to maintain the index ordering
# so we will sort it with its initial ordering which is stored
# in column "__"
lhs = cudf.DataFrame(
{"__": cudf.core.column.arange(len(labels))}, index=labels
)
rhs = cudf.DataFrame(
{"_": cudf.core.column.arange(len(obj))}, index=obj.index
)
return lhs.join(rhs).sort_values(by=["__", "_"])["_"]
def _get_label_range_or_mask(index, start, stop, step):
if (
not (start is None and stop is None)
and type(index) is cudf.core.index.DatetimeIndex
and index.is_monotonic_increasing is False
):
start = pd.to_datetime(start)
stop = pd.to_datetime(stop)
if start is not None and stop is not None:
if start > stop:
return slice(0, 0, None)
# TODO: Once Index binary ops are updated to support logical_and,
# can use that instead of using cupy.
boolean_mask = cp.logical_and((index >= start), (index <= stop))
elif start is not None:
boolean_mask = index >= start
else:
boolean_mask = index <= stop
return boolean_mask
else:
return index.find_label_range(slice(start, stop, step))
class _FrameIndexer:
"""Parent class for indexers."""
def __init__(self, frame):
self._frame = frame
_LocIndexerClass = TypeVar("_LocIndexerClass", bound="_FrameIndexer")
_IlocIndexerClass = TypeVar("_IlocIndexerClass", bound="_FrameIndexer")
class IndexedFrame(Frame):
"""A frame containing an index.
This class encodes the common behaviors for core user-facing classes like
DataFrame and Series that consist of a sequence of columns along with a
special set of index columns.
Parameters
----------
data : dict
An dict mapping column names to Columns
index : Table
A Frame representing the (optional) index columns.
"""
# mypy can't handle bound type variables as class members
_loc_indexer_type: Type[_LocIndexerClass] # type: ignore
_iloc_indexer_type: Type[_IlocIndexerClass] # type: ignore
_index: cudf.core.index.BaseIndex
_groupby = GroupBy
_resampler = _Resampler
_VALID_SCANS = {
"cumsum",
"cumprod",
"cummin",
"cummax",
}
# Necessary because the function names don't directly map to the docs.
_SCAN_DOCSTRINGS = {
"cumsum": {"op_name": "cumulative sum"},
"cumprod": {"op_name": "cumulative product"},
"cummin": {"op_name": "cumulative min"},
"cummax": {"op_name": "cumulative max"},
}
def __init__(self, data=None, index=None):
super().__init__(data=data)
# TODO: Right now it is possible to initialize an IndexedFrame without
# an index. The code's correctness relies on the subclass constructors
# assigning the attribute after the fact. We should restructure those
# to ensure that this constructor is always invoked with an index.
self._index = index
@property
def _num_rows(self) -> int:
# Important to use the index because the data may be empty.
return len(self._index)
@property
def _index_names(self) -> Tuple[Any, ...]: # TODO: Tuple[str]?
return self._index._data.names
@classmethod
def _from_data(
cls,
data: MutableMapping,
index: Optional[BaseIndex] = None,
):
out = super()._from_data(data)
out._index = RangeIndex(out._data.nrows) if index is None else index
return out
@_cudf_nvtx_annotate
def _from_data_like_self(self, data: MutableMapping):
out = self._from_data(data, self._index)
out._data._level_names = self._data._level_names
return out
@classmethod
@_cudf_nvtx_annotate
def _from_columns(
cls,
columns: List[ColumnBase],
column_names: List[str],
index_names: Optional[List[str]] = None,
):
"""Construct a `Frame` object from a list of columns.
If `index_names` is set, the first `len(index_names)` columns are
used to construct the index of the frame.
"""
data_columns = columns
index = None
if index_names is not None:
n_index_columns = len(index_names)
data_columns = columns[n_index_columns:]
index = _index_from_columns(columns[:n_index_columns])
if isinstance(index, cudf.MultiIndex):
index.names = index_names
else:
index.name = index_names[0]
out = super()._from_columns(data_columns, column_names)
if index is not None:
out._index = index
return out
@_cudf_nvtx_annotate
def _from_columns_like_self(
self,
columns: List[ColumnBase],
column_names: Optional[abc.Iterable[str]] = None,
index_names: Optional[List[str]] = None,
*,
override_dtypes: Optional[abc.Iterable[Optional[Dtype]]] = None,
) -> Self:
"""Construct a `Frame` from a list of columns with metadata from self.
If `index_names` is set, the first `len(index_names)` columns are
used to construct the index of the frame.
If override_dtypes is provided then any non-None entry will be
used for the dtype of the matching column in preference to the
dtype of the column in self.
"""
if column_names is None:
column_names = self._column_names
frame = self.__class__._from_columns(
columns, column_names, index_names
)
return frame._copy_type_metadata(
self,
include_index=bool(index_names),
override_dtypes=override_dtypes,
)
def __round__(self, digits=0):
# Shouldn't be added to BinaryOperand
# because pandas Index doesn't implement
# this method.
return self.round(decimals=digits)
def _mimic_inplace(
self, result: Self, inplace: bool = False
) -> Optional[Self]:
if inplace:
self._index = result._index
return super()._mimic_inplace(result, inplace)
# Scans
@_cudf_nvtx_annotate
def _scan(self, op, axis=None, skipna=True):
"""
Return {op_name} of the {cls}.
Parameters
----------
axis: {{index (0), columns(1)}}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA,
the result will be NA.
Returns
-------
{cls}
Examples
--------
**Series**
>>> import cudf
>>> ser = cudf.Series([1, 5, 2, 4, 3])
>>> ser.cumsum()
0 1
1 6
2 8
3 12
4 15
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({{'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]}})
>>> s.cumsum()
a b
0 1 7
1 3 15
2 6 24
3 10 34
"""
cast_to_int = op in ("cumsum", "cumprod")
skipna = True if skipna is None else skipna
results = {}
for name, col in self._data.items():
if skipna:
try:
result_col = col.nans_to_nulls()
except AttributeError:
result_col = col
else:
if col.has_nulls(include_nan=True):
first_index = col.isnull().find_first_value(True)
result_col = col.copy()
result_col[first_index:] = None
else:
result_col = col
if (
cast_to_int
and not is_decimal_dtype(result_col.dtype)
and (
np.issubdtype(result_col.dtype, np.integer)
or np.issubdtype(result_col.dtype, np.bool_)
)
):
# For reductions that accumulate a value (e.g. sum, not max)
# pandas returns an int64 dtype for all int or bool dtypes.
result_col = result_col.astype(np.int64)
results[name] = getattr(result_col, op)()
return self._from_data(results, self._index)
def _check_data_index_length_match(self) -> None:
# Validate that the number of rows in the data matches the index if the
# data is not empty. This is a helper for the constructor.
if self._data.nrows > 0 and self._data.nrows != len(self._index):
raise ValueError(
f"Length of values ({self._data.nrows}) does not "
f"match length of index ({len(self._index)})"
)
@property
@_cudf_nvtx_annotate
def empty(self):
"""
Indicator whether DataFrame or Series is empty.
True if DataFrame/Series is entirely empty (no items),
meaning any of the axes are of length 0.
Returns
-------
out : bool
If DataFrame/Series is empty, return True, if not return False.
Notes
-----
If DataFrame/Series contains only `null` values, it is still not
considered empty. See the example below.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'A' : []})
>>> df
Empty DataFrame
Columns: [A]
Index: []
>>> df.empty
True
If we only have `null` values in our DataFrame, it is
not considered empty! We will need to drop
the `null`'s to make the DataFrame empty:
>>> df = cudf.DataFrame({'A' : [None, None]})
>>> df
A
0 <NA>
1 <NA>
>>> df.empty
False
>>> df.dropna().empty
True
Non-empty and empty Series example:
>>> s = cudf.Series([1, 2, None])
>>> s
0 1
1 2
2 <NA>
dtype: int64
>>> s.empty
False
>>> s = cudf.Series([])
>>> s
Series([], dtype: float64)
>>> s.empty
True
"""
return self.size == 0
def copy(self, deep: bool = True) -> Self:
"""Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Examples
--------
>>> s = cudf.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = cudf.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s['a'] = 3
>>> shallow['b'] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
"""
return self._from_data(
self._data.copy(deep=deep),
# Indexes are immutable so copies can always be shallow.
self._index.copy(deep=False),
)
@_cudf_nvtx_annotate
def equals(self, other): # noqa: D102
if not super().equals(other):
return False
return self._index.equals(other._index)
@property
def index(self):
"""Get the labels for the rows."""
return self._index
@index.setter
def index(self, value):
old_length = len(self)
new_length = len(value)
# A DataFrame with 0 columns can have an index of arbitrary length.
if len(self._data) > 0 and new_length != old_length:
raise ValueError(
f"Length mismatch: Expected axis has {old_length} elements, "
f"new values have {len(value)} elements"
)
self._index = Index(value)
@_cudf_nvtx_annotate
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method=None,
):
"""Replace values given in ``to_replace`` with ``value``.
Parameters
----------
to_replace : numeric, str or list-like
Value(s) to replace.
* numeric or str:
- values equal to ``to_replace`` will be replaced
with ``value``
* list of numeric or str:
- If ``value`` is also list-like, ``to_replace`` and
``value`` must be of same length.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example, {'a': 'b',
'y': 'z'} replaces the value 'a' with 'b' and
'y' with 'z'.
To use a dict in this way the ``value`` parameter should
be ``None``.
value : scalar, dict, list-like, str, default None
Value to replace any values matching ``to_replace`` with.
inplace : bool, default False
If True, in place.
See Also
--------
Series.fillna
Raises
------
TypeError
- If ``to_replace`` is not a scalar, array-like, dict, or None
- If ``to_replace`` is a dict and value is not a list, dict,
or Series
ValueError
- If a list is passed to ``to_replace`` and ``value`` but they
are not the same length.
Returns
-------
result : Series
Series after replacement. The mask and index are preserved.
Notes
-----
Parameters that are currently not supported are: `limit`, `regex`,
`method`
Examples
--------
**Series**
Scalar ``to_replace`` and ``value``
>>> import cudf
>>> s = cudf.Series([0, 1, 2, 3, 4])
>>> s
0 0
1 1
2 2
3 3
4 4
dtype: int64
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
List-like ``to_replace``
>>> s.replace([1, 2], 10)
0 0
1 10
2 10
3 3
4 4
dtype: int64
dict-like ``to_replace``
>>> s.replace({1:5, 3:50})
0 0
1 5
2 2
3 50
4 4
dtype: int64
>>> s = cudf.Series(['b', 'a', 'a', 'b', 'a'])
>>> s
0 b
1 a
2 a
3 b
4 a
dtype: object
>>> s.replace({'a': None})
0 b
1 <NA>
2 <NA>
3 b
4 <NA>
dtype: object
If there is a mismatch in types of the values in
``to_replace`` & ``value`` with the actual series, then
cudf exhibits different behavior with respect to pandas
and the pairs are ignored silently:
>>> s = cudf.Series(['b', 'a', 'a', 'b', 'a'])
>>> s
0 b
1 a
2 a
3 b
4 a
dtype: object
>>> s.replace('a', 1)
0 b
1 a
2 a
3 b
4 a
dtype: object
>>> s.replace(['a', 'c'], [1, 2])
0 b
1 a
2 a
3 b
4 a
dtype: object
**DataFrame**
Scalar ``to_replace`` and ``value``
>>> import cudf
>>> df = cudf.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df
A B C
0 0 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
List-like ``to_replace``
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
dict-like ``to_replace``
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
"""
if limit is not None:
raise NotImplementedError("limit parameter is not implemented yet")
if regex:
raise NotImplementedError("regex parameter is not implemented yet")
if method not in ("pad", None):
raise NotImplementedError(
"method parameter is not implemented yet"
)
if not (to_replace is None and value is None):
copy_data = {}
(
all_na_per_column,
to_replace_per_column,
replacements_per_column,
) = _get_replacement_values_for_columns(
to_replace=to_replace,
value=value,
columns_dtype_map=self._dtypes,
)
for name, col in self._data.items():
try:
copy_data[name] = col.find_and_replace(
to_replace_per_column[name],
replacements_per_column[name],
all_na_per_column[name],
)
except (KeyError, OverflowError):
# We need to create a deep copy if:
# i. `find_and_replace` was not successful or any of
# `to_replace_per_column`, `replacements_per_column`,
# `all_na_per_column` don't contain the `name`
# that exists in `copy_data`.
# ii. There is an OverflowError while trying to cast
# `to_replace_per_column` to `replacements_per_column`.
copy_data[name] = col.copy(deep=True)
else:
copy_data = self._data.copy(deep=True)
result = self._from_data(copy_data, self._index)
return self._mimic_inplace(result, inplace=inplace)
@_cudf_nvtx_annotate
def clip(self, lower=None, upper=None, inplace=False, axis=1):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values.
Thresholds can be singular values or array like,
and in the latter case the clipping is performed
element-wise in the specified axis. Currently only
`axis=1` is supported.
Parameters
----------
lower : scalar or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it. If it is None,
there will be no clipping based on lower.
In case of Series/Index, lower is expected to be
a scalar or an array of size 1.
upper : scalar or array_like, default None
Maximum threshold value. All values below this
threshold will be set to it. If it is None,
there will be no clipping based on upper.
In case of Series, upper is expected to be
a scalar or an array of size 1.
inplace : bool, default False
Returns
-------
Clipped DataFrame/Series/Index/MultiIndex
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"a":[1, 2, 3, 4], "b":['a', 'b', 'c', 'd']})
>>> df.clip(lower=[2, 'b'], upper=[3, 'c'])
a b
0 2 b
1 2 b
2 3 c
3 3 c
>>> df.clip(lower=None, upper=[3, 'c'])
a b
0 1 a
1 2 b
2 3 c
3 3 c
>>> df.clip(lower=[2, 'b'], upper=None)
a b
0 2 b
1 2 b
2 3 c
3 4 d
>>> df.clip(lower=2, upper=3, inplace=True)
>>> df
a b
0 2 2
1 2 3
2 3 3
3 3 3
>>> import cudf
>>> sr = cudf.Series([1, 2, 3, 4])
>>> sr.clip(lower=2, upper=3)
0 2
1 2
2 3
3 3
dtype: int64
>>> sr.clip(lower=None, upper=3)
0 1
1 2
2 3
3 3
dtype: int64
>>> sr.clip(lower=2, upper=None, inplace=True)
>>> sr
0 2
1 2
2 3
3 4
dtype: int64
"""
if axis != 1:
raise NotImplementedError("`axis is not yet supported in clip`")
if lower is None and upper is None:
return None if inplace is True else self.copy(deep=True)
if is_scalar(lower):
lower = np.full(self._num_columns, lower)
if is_scalar(upper):
upper = np.full(self._num_columns, upper)
if len(lower) != len(upper):
raise ValueError("Length of lower and upper should be equal")
if len(lower) != self._num_columns:
raise ValueError(
"Length of lower/upper should be equal to number of columns"
)
if self.ndim == 1:
# In case of series and Index,
# swap lower and upper if lower > upper
if (
lower[0] is not None
and upper[0] is not None
and (lower[0] > upper[0])
):
lower[0], upper[0] = upper[0], lower[0]
data = {
name: col.clip(lower[i], upper[i])
for i, (name, col) in enumerate(self._data.items())
}
output = self._from_data(data, self._index)
output._copy_type_metadata(self, include_index=False)
return self._mimic_inplace(output, inplace=inplace)
def _copy_type_metadata(
self,
other: Self,
include_index: bool = True,
*,
override_dtypes: Optional[abc.Iterable[Optional[Dtype]]] = None,
) -> Self:
"""
Copy type metadata from each column of `other` to the corresponding
column of `self`.
See `ColumnBase._with_type_metadata` for more information.
"""
super()._copy_type_metadata(other, override_dtypes=override_dtypes)
if (
include_index
and self._index is not None
and other._index is not None
):
self._index._copy_type_metadata(other._index)
# When other._index is a CategoricalIndex, the current index
# will be a NumericalIndex with an underlying CategoricalColumn
# (the above _copy_type_metadata call will have converted the
# column). Calling cudf.Index on that column generates the
# appropriate index.
if isinstance(
other._index, cudf.core.index.CategoricalIndex
) and not isinstance(
self._index, cudf.core.index.CategoricalIndex
):
self._index = cudf.Index(
cast(cudf.core.index.NumericIndex, self._index)._column,
name=self._index.name,
)
elif isinstance(other._index, cudf.MultiIndex) and not isinstance(
self._index, cudf.MultiIndex
):
self._index = cudf.MultiIndex._from_data(
self._index._data, name=self._index.name
)
return self
@_cudf_nvtx_annotate
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction=None,
limit_area=None,
downcast=None,
**kwargs,
):
"""
Interpolate data values between some points.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. Currently,
only 'linear` is supported.
* 'linear': Ignore the index and treat the values as
equally spaced. This is the only method supported on MultiIndexes.
* 'index', 'values': linearly interpolate using the index as
an x-axis. Unsorted indices can lead to erroneous results.
axis : int, default 0
Axis to interpolate along. Currently,
only 'axis=0' is supported.
inplace : bool, default False
Update the data in place if possible.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values
"""
if method in {"pad", "ffill"} and limit_direction != "forward":
raise ValueError(
f"`limit_direction` must be 'forward' for method `{method}`"
)
if method in {"backfill", "bfill"} and limit_direction != "backward":
raise ValueError(
f"`limit_direction` must be 'backward' for method `{method}`"
)
data = self
if not isinstance(data._index, cudf.RangeIndex):
perm_sort = data._index.argsort()
data = data._gather(
GatherMap.from_column_unchecked(
cudf.core.column.as_column(perm_sort),
len(data),
nullify=False,
)
)
interpolator = cudf.core.algorithms.get_column_interpolator(method)
columns = {}
for colname, col in data._data.items():
if col.nullable:
col = col.astype("float64").fillna(np.nan)
# Interpolation methods may or may not need the index
columns[colname] = interpolator(col, index=data._index)
result = self._from_data(columns, index=data._index)
return (
result
if isinstance(data._index, cudf.RangeIndex)
# TODO: This should be a scatter, avoiding an argsort.
else result._gather(
GatherMap.from_column_unchecked(
cudf.core.column.as_column(perm_sort.argsort()),
len(result),
nullify=False,
)
)
)
@_cudf_nvtx_annotate
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""Shift values by `periods` positions."""
axis = self._get_axis_from_axis_arg(axis)
if axis != 0:
raise ValueError("Only axis=0 is supported.")
if freq is not None:
raise ValueError("The freq argument is not yet supported.")
data_columns = (
col.shift(periods, fill_value) for col in self._columns
)
return self.__class__._from_data(
zip(self._column_names, data_columns), self._index
)
@_cudf_nvtx_annotate
def truncate(self, before=None, after=None, axis=0, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, str, int
Truncate all rows before this index value.
after : date, str, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
The truncated Series or DataFrame.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
.. pandas-compat::
**DataFrame.truncate, Series.truncate**
The ``copy`` parameter is only present for API compatibility, but
``copy=False`` is not supported. This method always generates a
copy.
Examples
--------
**Series**
>>> import cudf
>>> cs1 = cudf.Series([1, 2, 3, 4])
>>> cs1
0 1
1 2
2 3
3 4
dtype: int64
>>> cs1.truncate(before=1, after=2)
1 2
2 3
dtype: int64
>>> import cudf
>>> dates = cudf.date_range(
... '2021-01-01 23:45:00', '2021-01-01 23:46:00', freq='s'
... )
>>> cs2 = cudf.Series(range(len(dates)), index=dates)
>>> cs2
2021-01-01 23:45:00 0
2021-01-01 23:45:01 1
2021-01-01 23:45:02 2
2021-01-01 23:45:03 3
2021-01-01 23:45:04 4
2021-01-01 23:45:05 5
2021-01-01 23:45:06 6
2021-01-01 23:45:07 7
2021-01-01 23:45:08 8
2021-01-01 23:45:09 9
2021-01-01 23:45:10 10
2021-01-01 23:45:11 11
2021-01-01 23:45:12 12
2021-01-01 23:45:13 13
2021-01-01 23:45:14 14
2021-01-01 23:45:15 15
2021-01-01 23:45:16 16
2021-01-01 23:45:17 17
2021-01-01 23:45:18 18
2021-01-01 23:45:19 19
2021-01-01 23:45:20 20
2021-01-01 23:45:21 21
2021-01-01 23:45:22 22
2021-01-01 23:45:23 23
2021-01-01 23:45:24 24
...
2021-01-01 23:45:56 56
2021-01-01 23:45:57 57
2021-01-01 23:45:58 58
2021-01-01 23:45:59 59
dtype: int64
>>> cs2.truncate(
... before="2021-01-01 23:45:18", after="2021-01-01 23:45:27"
... )
2021-01-01 23:45:18 18
2021-01-01 23:45:19 19
2021-01-01 23:45:20 20
2021-01-01 23:45:21 21
2021-01-01 23:45:22 22
2021-01-01 23:45:23 23
2021-01-01 23:45:24 24
2021-01-01 23:45:25 25
2021-01-01 23:45:26 26
2021-01-01 23:45:27 27
dtype: int64
>>> cs3 = cudf.Series({'A': 1, 'B': 2, 'C': 3, 'D': 4})
>>> cs3
A 1
B 2
C 3
D 4
dtype: int64
>>> cs3.truncate(before='B', after='C')
B 2
C 3
dtype: int64
**DataFrame**
>>> df = cudf.DataFrame({
... 'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']
... }, index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
>>> import cudf
>>> dates = cudf.date_range(
... '2021-01-01 23:45:00', '2021-01-01 23:46:00', freq='s'
... )
>>> df2 = cudf.DataFrame(data={'A': 1, 'B': 2}, index=dates)
>>> df2.head()
A B
2021-01-01 23:45:00 1 2
2021-01-01 23:45:01 1 2
2021-01-01 23:45:02 1 2
2021-01-01 23:45:03 1 2
2021-01-01 23:45:04 1 2
>>> df2.truncate(
... before="2021-01-01 23:45:18", after="2021-01-01 23:45:27"
... )
A B
2021-01-01 23:45:18 1 2
2021-01-01 23:45:19 1 2
2021-01-01 23:45:20 1 2
2021-01-01 23:45:21 1 2
2021-01-01 23:45:22 1 2
2021-01-01 23:45:23 1 2
2021-01-01 23:45:24 1 2
2021-01-01 23:45:25 1 2
2021-01-01 23:45:26 1 2
2021-01-01 23:45:27 1 2
"""
if not copy:
raise ValueError("Truncating with copy=False is not supported.")
axis = self._get_axis_from_axis_arg(axis)
ax = self._index if axis == 0 else self._data.to_pandas_index()
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
if type(ax) is cudf.core.index.DatetimeIndex:
before = pd.to_datetime(before)
after = pd.to_datetime(after)
if before is not None and after is not None and before > after:
raise ValueError(f"Truncate: {after} must be after {before}")
if len(ax) > 1 and ax.is_monotonic_decreasing and ax.nunique() > 1:
before, after = after, before
slicer = [slice(None, None)] * self.ndim
slicer[axis] = slice(before, after)
return self.loc[tuple(slicer)].copy()
@cached_property
def loc(self):
"""Select rows and columns by label or boolean mask.
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([10, 11, 12], index=['a', 'b', 'c'])
>>> series
a 10
b 11
c 12
dtype: int64
>>> series.loc['b']
11
**DataFrame**
DataFrame with string index.
>>> df
a b
a 0 5
b 1 6
c 2 7
d 3 8
e 4 9
Select a single row by label.
>>> df.loc['a']
a 0
b 5
Name: a, dtype: int64
Select multiple rows and a single column.
>>> df.loc[['a', 'c', 'e'], 'b']
a 5
c 7
e 9
Name: b, dtype: int64
Selection by boolean mask.
>>> df.loc[df.a > 2]
a b
d 3 8
e 4 9
Setting values using loc.
>>> df.loc[['a', 'c', 'e'], 'a'] = 0
>>> df
a b
a 0 5
b 1 6
c 0 7
d 3 8
e 0 9
"""
return self._loc_indexer_type(self)
@cached_property
def iloc(self):
"""Select values by position.
Examples
--------
**Series**
>>> import cudf
>>> s = cudf.Series([10, 20, 30])
>>> s
0 10
1 20
2 30
dtype: int64
>>> s.iloc[2]
30
**DataFrame**
Selecting rows and column by position.
>>> df = cudf.DataFrame({'a': range(20),
... 'b': range(20),
... 'c': range(20)})
Select a single row using an integer index.
>>> df.iloc[1]
a 1
b 1
c 1
Name: 1, dtype: int64
Select multiple rows using a list of integers.
>>> df.iloc[[0, 2, 9, 18]]
a b c
0 0 0 0
2 2 2 2
9 9 9 9
18 18 18 18
Select rows using a slice.
>>> df.iloc[3:10:2]
a b c
3 3 3 3
5 5 5 5
7 7 7 7
9 9 9 9
Select both rows and columns.
>>> df.iloc[[1, 3, 5, 7], 2]
1 1
3 3
5 5
7 7
Name: c, dtype: int64
Setting values in a column using iloc.
>>> df.iloc[:4] = 0
>>> df
a b c
0 0 0 0
1 0 0 0
2 0 0 0
3 0 0 0
4 4 4 4
5 5 5 5
6 6 6 6
7 7 7 7
8 8 8 8
9 9 9 9
[10 more rows]
"""
return self._iloc_indexer_type(self)
@_cudf_nvtx_annotate
def scale(self):
"""
Scale values to [0, 1] in float64
Returns
-------
DataFrame or Series
Values scaled to [0, 1].
Examples
--------
>>> import cudf
>>> series = cudf.Series([10, 11, 12, 0.5, 1])
>>> series
0 10.0
1 11.0
2 12.0
3 0.5
4 1.0
dtype: float64
>>> series.scale()
0 0.826087
1 0.913043
2 1.000000
3 0.000000
4 0.043478
dtype: float64
"""
vmin = self.min()
vmax = self.max()
scaled = (self - vmin) / (vmax - vmin)
scaled._index = self._index.copy(deep=False)
return scaled
@_cudf_nvtx_annotate
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind=None,
na_position="last",
sort_remaining=True,
ignore_index=False,
key=None,
):
"""Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
This is only useful in the case of MultiIndex.
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : sorting method such as `quick sort` and others.
Not yet supported.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if first; last puts NaNs at the end.
sort_remaining : bool, default True
When sorting a multiindex on a subset of its levels,
should entries be lexsorted by the remaining
(non-specified) levels as well?
ignore_index : bool, default False
if True, index will be replaced with RangeIndex.
key : callable, optional
If not None, apply the key function to the index values before
sorting. This is similar to the key argument in the builtin
sorted() function, with the notable difference that this key
function should be vectorized. It should expect an Index and return
an Index of the same shape. For MultiIndex inputs, the key is
applied per level.
Returns
-------
Frame or None
Notes
-----
Difference from pandas:
* Not supporting: kind, sort_remaining=False
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4])
>>> series
3 a
2 b
1 c
4 d
dtype: object
>>> series.sort_index()
1 c
2 b
3 a
4 d
dtype: object
Sort Descending
>>> series.sort_index(ascending=False)
4 d
3 a
2 b
1 c
dtype: object
**DataFrame**
>>> df = cudf.DataFrame(
... {"b":[3, 2, 1], "a":[2, 1, 3]}, index=[1, 3, 2])
>>> df.sort_index(axis=0)
b a
1 3 2
2 1 3
3 2 1
>>> df.sort_index(axis=1)
a b
1 2 3
3 1 2
2 3 1
"""
if kind is not None:
raise NotImplementedError("kind is not yet supported")
if key is not None:
raise NotImplementedError("key is not yet supported.")
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
if axis in (0, "index"):
idx = self.index
if isinstance(idx, MultiIndex):
if level is not None:
# Pandas doesn't handle na_position in case of MultiIndex.
na_position = "first" if ascending is True else "last"
if not is_list_like(level):
level = [level]
by = list(map(idx._get_level_label, level))
if sort_remaining:
handled = set(by)
by.extend(
filter(
lambda n: n not in handled,
self.index._data.names,
)
)
else:
by = list(idx._data.names)
inds = idx._get_sorted_inds(
by=by, ascending=ascending, na_position=na_position
)
out = self._gather(
GatherMap.from_column_unchecked(
inds, len(self), nullify=False
)
)
# TODO: frame factory function should handle multilevel column
# names
if (
isinstance(self, cudf.core.dataframe.DataFrame)
and self._data.multiindex
):
out._set_column_names_like(self)
elif (ascending and idx.is_monotonic_increasing) or (
not ascending and idx.is_monotonic_decreasing
):
out = self.copy()
else:
inds = idx.argsort(
ascending=ascending, na_position=na_position
)
out = self._gather(
GatherMap.from_column_unchecked(
cudf.core.column.as_column(inds),
len(self),
nullify=False,
)
)
if (
isinstance(self, cudf.core.dataframe.DataFrame)
and self._data.multiindex
):
out._set_column_names_like(self)
else:
labels = sorted(self._data.names, reverse=not ascending)
out = self[labels]
if ignore_index is True:
out = out.reset_index(drop=True)
return self._mimic_inplace(out, inplace=inplace)
def memory_usage(self, index=True, deep=False):
"""Return the memory usage of an object.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the index.
deep : bool, default False
The deep parameter is ignored and is only included for pandas
compatibility.
Returns
-------
Series or scalar
For DataFrame, a Series whose index is the original column names
and whose values is the memory usage of each column in bytes. For a
Series the total memory usage.
Examples
--------
**DataFrame**
>>> dtypes = ['int64', 'float64', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = cudf.DataFrame(data)
>>> df.head()
int64 float64 object bool
0 1 1.0 1.0 True
1 1 1.0 1.0 True
2 1 1.0 1.0 True
3 1 1.0 1.0 True
4 1 1.0 1.0 True
>>> df.memory_usage(index=False)
int64 40000
float64 40000
object 40000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5008
**Series**
>>> s = cudf.Series(range(3), index=['a','b','c'])
>>> s.memory_usage()
43
Not including the index gives the size of the rest of the data, which
is necessarily smaller:
>>> s.memory_usage(index=False)
24
"""
raise NotImplementedError
def hash_values(self, method="murmur3", seed=None):
"""Compute the hash of values in this column.
Parameters
----------
method : {'murmur3', 'md5'}, default 'murmur3'
Hash function to use:
* murmur3: MurmurHash3 hash function.
* md5: MD5 hash function.
seed : int, optional
Seed value to use for the hash function.
Note - This only has effect for the following supported
hash functions:
* murmur3: MurmurHash3 hash function.
Returns
-------
Series
A Series with hash values.
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([10, 120, 30])
>>> series
0 10
1 120
2 30
dtype: int64
>>> series.hash_values(method="murmur3")
0 -1930516747
1 422619251
2 -941520876
dtype: int32
>>> series.hash_values(method="md5")
0 7be4bbacbfdb05fb3044e36c22b41e8b
1 947ca8d2c5f0f27437f156cfbfab0969
2 d0580ef52d27c043c8e341fd5039b166
dtype: object
>>> series.hash_values(method="murmur3", seed=42)
0 2364453205
1 422621911
2 3353449140
dtype: uint32
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({"a": [10, 120, 30], "b": [0.0, 0.25, 0.50]})
>>> df
a b
0 10 0.00
1 120 0.25
2 30 0.50
>>> df.hash_values(method="murmur3")
0 -330519225
1 -397962448
2 -1345834934
dtype: int32
>>> df.hash_values(method="md5")
0 57ce879751b5169c525907d5c563fae1
1 948d6221a7c4963d4be411bcead7e32b
2 fe061786ea286a515b772d91b0dfcd70
dtype: object
"""
seed_hash_methods = {"murmur3"}
if seed is None:
seed = 0
elif method not in seed_hash_methods:
warnings.warn(
"Provided seed value has no effect for hash method"
f" `{method}`. Refer to the docstring for information"
" on hash methods that support the `seed` param"
)
# Note that both Series and DataFrame return Series objects from this
# calculation, necessitating the unfortunate circular reference to the
# child class here.
return cudf.Series._from_data(
{None: libcudf.hash.hash([*self._columns], method, seed)},
index=self.index,
)
def _gather(
self,
gather_map: GatherMap,
keep_index=True,
):
"""Gather rows of frame specified by indices in `gather_map`.
Maintain the index if keep_index is True.
This function does no expensive bounds checking, but does
check that the number of rows of self matches the validated
number of rows.
"""
if not gather_map.nullify and len(self) != gather_map.nrows:
raise IndexError("Gather map is out of bounds")
return self._from_columns_like_self(
libcudf.copying.gather(
list(self._index._columns + self._columns)
if keep_index
else list(self._columns),
gather_map.column,
nullify=gather_map.nullify,
),
self._column_names,
self._index.names if keep_index else None,
)
def _slice(self, arg: slice, keep_index: bool = True) -> Self:
"""Slice a frame.
Parameters
----------
arg
The slice
keep_index
Preserve the index when slicing?
Returns
-------
Sliced frame
Notes
-----
This slicing has normal python semantics.
"""
num_rows = len(self)
if num_rows == 0:
return self
start, stop, stride = arg.indices(num_rows)
index = self.index
has_range_index = isinstance(index, RangeIndex)
if len(range(start, stop, stride)) == 0:
# Avoid materialising the range index column
result = self._empty_like(
keep_index=keep_index and not has_range_index
)
if keep_index and has_range_index:
lo = index.start + start * index.step
hi = index.start + stop * index.step
step = index.step * stride
result.index = RangeIndex(
start=lo, stop=hi, step=step, name=index.name
)
return result
if start < 0:
start = start + num_rows
# At this point, we have converted slice arguments into
# indices that no longer wrap around.
# For example slice(4, None, -1) will produce the
# start, stop, stride tuple (4, -1, -1)
# This check makes sure -1 is not wrapped (again) to
# produce -1 + num_rows.
if stop < 0 and not (stride < 0 and stop == -1):
stop = stop + num_rows
stride = 1 if stride is None else stride
if (stop - start) * stride <= 0:
return self._empty_like(keep_index=True)
start = min(start, num_rows)
stop = min(stop, num_rows)
if stride != 1:
return self._gather(
GatherMap.from_column_unchecked(
cudf.core.column.arange(
start,
stop=stop,
step=stride,
dtype=libcudf.types.size_type_dtype,
),
len(self),
nullify=False,
),
keep_index=keep_index,
)
columns_to_slice = [
*(
self._index._data.columns
if keep_index and not has_range_index
else []
),
*self._columns,
]
result = self._from_columns_like_self(
libcudf.copying.columns_slice(columns_to_slice, [start, stop])[0],
self._column_names,
None if has_range_index or not keep_index else self._index.names,
)
if keep_index and has_range_index:
result.index = self.index[start:stop]
return result
def _positions_from_column_names(
self, column_names, offset_by_index_columns=False
):
"""Map each column name into their positions in the frame.
Return positions of the provided column names, offset by the number of
index columns if `offset_by_index_columns` is True. The order of
indices returned corresponds to the column order in this Frame.
"""
num_index_columns = (
len(self._index._data) if offset_by_index_columns else 0
)
return [
i + num_index_columns
for i, name in enumerate(self._column_names)
if name in set(column_names)
]
def drop_duplicates(
self,
subset=None,
keep="first",
nulls_are_equal=True,
ignore_index=False,
):
"""
Drop duplicate rows in frame.
subset : list, optional
List of columns to consider when dropping rows.
keep : ["first", "last", False]
"first" will keep the first duplicate entry, "last" will keep the
last duplicate entry, and False will drop all duplicates.
nulls_are_equal: bool, default True
Null elements are considered equal to other null elements.
ignore_index: bool, default False
If True, the resulting axis will be labeled 0, 1, ..., n - 1.
"""
if not isinstance(ignore_index, (np.bool_, bool)):
raise ValueError(
f"{ignore_index=} must be bool, "
f"not {type(ignore_index).__name__}"
)
subset = self._preprocess_subset(subset)
subset_cols = [name for name in self._column_names if name in subset]
if len(subset_cols) == 0:
return self.copy(deep=True)
keys = self._positions_from_column_names(
subset, offset_by_index_columns=not ignore_index
)
return self._from_columns_like_self(
libcudf.stream_compaction.drop_duplicates(
list(self._columns)
if ignore_index
else list(self._index._columns + self._columns),
keys=keys,
keep=keep,
nulls_are_equal=nulls_are_equal,
),
self._column_names,
self._index.names if not ignore_index else None,
)
@_cudf_nvtx_annotate
def duplicated(self, subset=None, keep="first"):
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``'first'`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``'last'`` : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series indicating duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider a dataset containing ramen product ratings.
>>> import cudf
>>> df = cudf.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Maggie', 'Maggie', 'Maggie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Maggie cup 3.5
3 Maggie pack 15.0
4 Maggie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set to False and all others to True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set to False and all others to True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` to False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
subset = self._preprocess_subset(subset)
if isinstance(self, cudf.Series):
df = self.to_frame(name="None")
subset = ["None"]
else:
df = self.copy(deep=False)
df._data["index"] = cudf.core.column.arange(
0, len(self), dtype=size_type_dtype
)
new_df = df.drop_duplicates(subset=subset, keep=keep)
idx = df.merge(new_df, how="inner")["index"]
s = cudf.Series._from_data(
{
None: cudf.core.column.full(
size=len(self), fill_value=True, dtype="bool"
)
},
index=self.index,
)
s.iloc[idx] = False
return s
@_cudf_nvtx_annotate
def _empty_like(self, keep_index=True) -> Self:
return self._from_columns_like_self(
libcudf.copying.columns_empty_like(
[
*(self._index._data.columns if keep_index else ()),
*self._columns,
]
),
self._column_names,
self._index.names if keep_index else None,
)
def _split(self, splits, keep_index=True):
if self._num_rows == 0:
return []
columns_split = libcudf.copying.columns_split(
[
*(self._index._data.columns if keep_index else []),
*self._columns,
],
splits,
)
return [
self._from_columns_like_self(
columns_split[i],
self._column_names,
self._index.names if keep_index else None,
)
for i in range(len(splits) + 1)
]
@_cudf_nvtx_annotate
def fillna(
self, value=None, method=None, axis=None, inplace=False, limit=None
): # noqa: D102
old_index = self._index
ret = super().fillna(value, method, axis, inplace, limit)
if inplace:
self._index = old_index
else:
ret._index = old_index
return ret
@_cudf_nvtx_annotate
def bfill(self, value=None, axis=None, inplace=None, limit=None):
"""
Synonym for :meth:`Series.fillna` with ``method='bfill'``.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="bfill",
value=value,
axis=axis,
inplace=inplace,
limit=limit,
)
@_cudf_nvtx_annotate
def backfill(self, value=None, axis=None, inplace=None, limit=None):
"""
Synonym for :meth:`Series.fillna` with ``method='bfill'``.
.. deprecated:: 23.06
Use `DataFrame.bfill/Series.bfill` instead.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
# Do not remove until pandas removes this.
warnings.warn(
"DataFrame.backfill/Series.backfill is deprecated. Use "
"DataFrame.bfill/Series.bfill instead",
FutureWarning,
)
return self.bfill(value=value, axis=axis, inplace=inplace, limit=limit)
@_cudf_nvtx_annotate
def ffill(self, value=None, axis=None, inplace=None, limit=None):
"""
Synonym for :meth:`Series.fillna` with ``method='ffill'``.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
return self.fillna(
method="ffill",
value=value,
axis=axis,
inplace=inplace,
limit=limit,
)
@_cudf_nvtx_annotate
def pad(self, value=None, axis=None, inplace=None, limit=None):
"""
Synonym for :meth:`Series.fillna` with ``method='ffill'``.
.. deprecated:: 23.06
Use `DataFrame.ffill/Series.ffill` instead.
Returns
-------
Object with missing values filled or None if ``inplace=True``.
"""
# Do not remove until pandas removes this.
warnings.warn(
"DataFrame.pad/Series.pad is deprecated. Use "
"DataFrame.ffill/Series.ffill instead",
FutureWarning,
)
return self.ffill(value=value, axis=axis, inplace=inplace, limit=limit)
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series with updated labels or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string 'suffix'.
DataFrame.add_suffix: Suffix column labels with string 'suffix'.
Examples
--------
**Series**
>>> s = cudf.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
**DataFrame**
>>> df = cudf.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
raise NotImplementedError(
"`IndexedFrame.add_prefix` not currently implemented. \
Use `Series.add_prefix` or `DataFrame.add_prefix`"
)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
prefix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series with updated labels or DataFrame with updated labels.
See Also
--------
Series.add_prefix: prefix row labels with string 'prefix'.
DataFrame.add_prefix: Prefix column labels with string 'prefix'.
Examples
--------
**Series**
>>> s = cudf.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
**DataFrame**
>>> df = cudf.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
raise NotImplementedError
@acquire_spill_lock()
@_cudf_nvtx_annotate
def _apply(self, func, kernel_getter, *args, **kwargs):
"""Apply `func` across the rows of the frame."""
if kwargs:
raise ValueError("UDFs using **kwargs are not yet supported.")
try:
kernel, retty = _compile_or_get(
self, func, args, kernel_getter=kernel_getter
)
except Exception as e:
raise ValueError(
"user defined function compilation failed."
) from e
# Mask and data column preallocated
ans_col = _return_arr_from_dtype(retty, len(self))
ans_mask = cudf.core.column.full(
size=len(self), fill_value=True, dtype="bool"
)
output_args = [(ans_col, ans_mask), len(self)]
input_args = _get_input_args_from_frame(self)
launch_args = output_args + input_args + list(args)
try:
with _CUDFNumbaConfig():
kernel.forall(len(self))(*launch_args)
except Exception as e:
raise RuntimeError("UDF kernel execution failed.") from e
col = _post_process_output_col(ans_col, retty)
col.set_base_mask(libcudf.transform.bools_to_mask(ans_mask))
result = cudf.Series._from_data({None: col}, self._index)
return result
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
"""Sort by the values along either axis.
Parameters
----------
by : str or list of str
Name or list of names to sort by.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of the
by.
na_position : {'first', 'last'}, default 'last'
'first' puts nulls at the beginning, 'last' puts nulls at the end
ignore_index : bool, default False
If True, index will not be sorted.
Returns
-------
Frame : Frame with sorted values.
Notes
-----
Difference from pandas:
* Support axis='index' only.
* Not supporting: inplace, kind
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['a'] = [0, 1, 2]
>>> df['b'] = [-3, 2, 0]
>>> df.sort_values('b')
a b
0 0 -3
2 2 0
1 1 2
"""
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
if inplace:
raise NotImplementedError("`inplace` not currently implemented.")
if kind != "quicksort":
if kind not in {"mergesort", "heapsort", "stable"}:
raise AttributeError(
f"{kind} is not a valid sorting algorithm for "
f"'DataFrame' object"
)
warnings.warn(
f"GPU-accelerated {kind} is currently not supported, "
f"defaulting to quicksort."
)
if axis != 0:
raise NotImplementedError("`axis` not currently implemented.")
if len(self) == 0:
return self
# argsort the `by` column
out = self._gather(
GatherMap.from_column_unchecked(
self._get_columns_by_label(by)._get_sorted_inds(
ascending=ascending, na_position=na_position
),
len(self),
nullify=False,
),
keep_index=not ignore_index,
)
if (
isinstance(self, cudf.core.dataframe.DataFrame)
and self._data.multiindex
):
out.columns = self._data.to_pandas_index()
return out
def _n_largest_or_smallest(self, largest, n, columns, keep):
# Get column to operate on
if isinstance(columns, str):
columns = [columns]
method = "nlargest" if largest else "nsmallest"
for col in columns:
if isinstance(self._data[col], cudf.core.column.StringColumn):
if isinstance(self, cudf.DataFrame):
error_msg = (
f"Column '{col}' has dtype {self._data[col].dtype}, "
f"cannot use method '{method}' with this dtype"
)
else:
error_msg = (
f"Cannot use method '{method}' with "
f"dtype {self._data[col].dtype}"
)
raise TypeError(error_msg)
if len(self) == 0:
return self
if keep == "first":
if n < 0:
n = 0
# argsort the `by` column
return self._gather(
GatherMap.from_column_unchecked(
self._get_columns_by_label(columns)
._get_sorted_inds(ascending=not largest)
.slice(*slice(None, n).indices(len(self))),
len(self),
nullify=False,
),
keep_index=True,
)
elif keep == "last":
indices = self._get_columns_by_label(columns)._get_sorted_inds(
ascending=largest
)
if n <= 0:
# Empty slice.
indices = indices.slice(0, 0)
else:
indices = indices.slice(
*slice(None, -n - 1, -1).indices(len(self))
)
return self._gather(
GatherMap.from_column_unchecked(
indices, len(self), nullify=False
),
keep_index=True,
)
else:
raise ValueError('keep must be either "first", "last"')
def _align_to_index(
self,
index: ColumnLike,
how: str = "outer",
sort: bool = True,
allow_non_unique: bool = False,
) -> Self:
index = cudf.core.index.as_index(index)
if self.index.equals(index):
return self
if not allow_non_unique:
if not self.index.is_unique or not index.is_unique:
raise ValueError("Cannot align indices with non-unique values")
lhs = cudf.DataFrame._from_data(self._data, index=self.index)
rhs = cudf.DataFrame._from_data({}, index=index)
# create a temporary column that we will later sort by
# to recover ordering after index alignment.
sort_col_id = str(uuid4())
if how == "left":
lhs[sort_col_id] = cudf.core.column.arange(len(lhs))
elif how == "right":
rhs[sort_col_id] = cudf.core.column.arange(len(rhs))
result = lhs.join(rhs, how=how, sort=sort)
if how in ("left", "right"):
result = result.sort_values(sort_col_id)
del result[sort_col_id]
result = self.__class__._from_data(
data=result._data, index=result.index
)
result._data.multiindex = self._data.multiindex
result._data._level_names = self._data._level_names
result.index.names = self.index.names
return result
@_cudf_nvtx_annotate
def _reindex(
self,
column_names,
dtypes=None,
deep=False,
index=None,
inplace=False,
fill_value=NA,
):
"""
Helper for `.reindex`
Parameters
----------
columns_names : array-like
The list of columns to select from the Frame,
if ``columns`` is a superset of ``Frame.columns`` new
columns are created.
dtypes : dict
Mapping of dtypes for the empty columns being created.
deep : boolean, optional, default False
Whether to make deep copy or shallow copy of the columns.
index : Index or array-like, default None
The ``index`` to be used to reindex the Frame with.
inplace : bool, default False
Whether to perform the operation in place on the data.
fill_value : value with which to replace nulls in the result
Returns
-------
Series or DataFrame
"""
if dtypes is None:
dtypes = {}
df = self
if index is not None:
if not df._index.is_unique:
raise ValueError(
"cannot reindex on an axis with duplicate labels"
)
index = cudf.core.index.as_index(
index, name=getattr(index, "name", self._index.name)
)
idx_dtype_match = (df.index.nlevels == index.nlevels) and all(
_is_same_dtype(left_dtype, right_dtype)
for left_dtype, right_dtype in zip(
(col.dtype for col in df.index._data.columns),
(col.dtype for col in index._data.columns),
)
)
if not idx_dtype_match:
column_names = (
column_names
if column_names is not None
else list(df._column_names)
)
df = cudf.DataFrame()
else:
lhs = cudf.DataFrame._from_data({}, index=index)
rhs = cudf.DataFrame._from_data(
{
# bookkeeping workaround for unnamed series
(name or 0)
if isinstance(self, cudf.Series)
else name: col
for name, col in df._data.items()
},
index=df._index,
)
df = lhs.join(rhs, how="left", sort=True)
# double-argsort to map back from sorted to unsorted positions
df = df.take(index.argsort(ascending=True).argsort())
index = index if index is not None else df.index
names = (
column_names if column_names is not None else list(df._data.names)
)
cols = {
name: (
df._data[name].copy(deep=deep)
if name in df._data
else cudf.core.column.column.column_empty(
dtype=dtypes.get(name, np.float64),
masked=True,
row_count=len(index),
)
)
for name in names
}
result = self.__class__._from_data(
data=cudf.core.column_accessor.ColumnAccessor(
cols,
multiindex=self._data.multiindex,
level_names=tuple(column_names.names)
if isinstance(column_names, pd.Index)
else None,
),
index=index,
)
result.fillna(fill_value, inplace=True)
return self._mimic_inplace(result, inplace=inplace)
def round(self, decimals=0, how="half_even"):
"""
Round to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. This parameter
must be an int for a Series. For a DataFrame, a dict or a Series
are also valid inputs. If an int is given, round each column to the
same number of places. Otherwise dict and Series round to variable
numbers of places. Column names should be in the keys if
`decimals` is a dict-like, or in the index if `decimals` is a
Series. Any columns not included in `decimals` will be left as is.
Elements of `decimals` which are not columns of the input will be
ignored.
how : str, optional
Type of rounding. Can be either "half_even" (default)
or "half_up" rounding.
Returns
-------
Series or DataFrame
A Series or DataFrame with the affected columns rounded to the
specified number of decimal places.
Examples
--------
**Series**
>>> s = cudf.Series([0.1, 1.4, 2.9])
>>> s.round()
0 0.0
1 1.0
2 3.0
dtype: float64
**DataFrame**
>>> df = cudf.DataFrame(
... [(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'],
... )
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places.
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as keys and the number of decimal
places as values.
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as the index and the number of
decimal places as the values.
>>> decimals = cudf.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
if isinstance(decimals, cudf.Series):
decimals = decimals.to_pandas()
if isinstance(decimals, pd.Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
decimals = decimals.to_dict()
elif isinstance(decimals, int):
decimals = {name: decimals for name in self._column_names}
elif not isinstance(decimals, abc.Mapping):
raise TypeError(
"decimals must be an integer, a dict-like or a Series"
)
cols = {
name: col.round(decimals[name], how=how)
if (
name in decimals
and _is_non_decimal_numeric_dtype(col.dtype)
and not is_bool_dtype(col.dtype)
)
else col.copy(deep=True)
for name, col in self._data.items()
}
return self.__class__._from_data(
data=cudf.core.column_accessor.ColumnAccessor(
cols,
multiindex=self._data.multiindex,
level_names=self._data.level_names,
),
index=self._index,
)
def resample(
self,
rule,
axis=0,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
base=None,
on=None,
level=None,
origin="start_day",
offset=None,
):
"""
Convert the frequency of ("resample") the given time series data.
Parameters
----------
rule: str
The offset string representing the frequency to use.
Note that DateOffset objects are not yet supported.
closed: {"right", "left"}, default None
Which side of bin interval is closed. The default is
"left" for all frequency offsets except for "M" and "W",
which have a default of "right".
label: {"right", "left"}, default None
Which bin edge label to label bucket with. The default is
"left" for all frequency offsets except for "M" and "W",
which have a default of "right".
on: str, optional
For a DataFrame, column to use instead of the index for
resampling. Column must be a datetime-like.
level: str or int, optional
For a MultiIndex, level to use instead of the index for
resampling. The level must be a datetime-like.
Returns
-------
A Resampler object
Examples
--------
First, we create a time series with 1 minute intervals:
>>> index = cudf.date_range(start="2001-01-01", periods=10, freq="1T")
>>> sr = cudf.Series(range(10), index=index)
>>> sr
2001-01-01 00:00:00 0
2001-01-01 00:01:00 1
2001-01-01 00:02:00 2
2001-01-01 00:03:00 3
2001-01-01 00:04:00 4
2001-01-01 00:05:00 5
2001-01-01 00:06:00 6
2001-01-01 00:07:00 7
2001-01-01 00:08:00 8
2001-01-01 00:09:00 9
dtype: int64
Downsampling to 3 minute intervals, followed by a "sum" aggregation:
>>> sr.resample("3T").sum()
2001-01-01 00:00:00 3
2001-01-01 00:03:00 12
2001-01-01 00:06:00 21
2001-01-01 00:09:00 9
dtype: int64
Use the right side of each interval to label the bins:
>>> sr.resample("3T", label="right").sum()
2001-01-01 00:03:00 3
2001-01-01 00:06:00 12
2001-01-01 00:09:00 21
2001-01-01 00:12:00 9
dtype: int64
Close the right side of the interval instead of the left:
>>> sr.resample("3T", closed="right").sum()
2000-12-31 23:57:00 0
2001-01-01 00:00:00 6
2001-01-01 00:03:00 15
2001-01-01 00:06:00 24
dtype: int64
Upsampling to 30 second intervals:
>>> sr.resample("30s").asfreq()[:5] # show the first 5 rows
2001-01-01 00:00:00 0
2001-01-01 00:00:30 <NA>
2001-01-01 00:01:00 1
2001-01-01 00:01:30 <NA>
2001-01-01 00:02:00 2
dtype: int64
Upsample and fill nulls using the "bfill" method:
>>> sr.resample("30s").bfill()[:5]
2001-01-01 00:00:00 0
2001-01-01 00:00:30 1
2001-01-01 00:01:00 1
2001-01-01 00:01:30 2
2001-01-01 00:02:00 2
dtype: int64
Resampling by a specified column of a Dataframe:
>>> df = cudf.DataFrame({
... "price": [10, 11, 9, 13, 14, 18, 17, 19],
... "volume": [50, 60, 40, 100, 50, 100, 40, 50],
... "week_starting": cudf.date_range(
... "2018-01-01", periods=8, freq="7D"
... )
... })
>>> df
price volume week_starting
0 10 50 2018-01-01
1 11 60 2018-01-08
2 9 40 2018-01-15
3 13 100 2018-01-22
4 14 50 2018-01-29
5 18 100 2018-02-05
6 17 40 2018-02-12
7 19 50 2018-02-19
>>> df.resample("M", on="week_starting").mean()
price volume
week_starting
2018-01-31 11.4 60.000000
2018-02-28 18.0 63.333333
Notes
-----
Note that the dtype of the index (or the 'on' column if using
'on=') in the result will be of a frequency closest to the
resampled frequency. For example, if resampling from
nanoseconds to milliseconds, the index will be of dtype
'datetime64[ms]'.
"""
import cudf.core.resample
if (axis, convention, kind, loffset, base, origin, offset) != (
0,
"start",
None,
None,
None,
"start_day",
None,
):
raise NotImplementedError(
"The following arguments are not "
"currently supported by resample:\n\n"
"- axis\n"
"- convention\n"
"- kind\n"
"- loffset\n"
"- base\n"
"- origin\n"
"- offset"
)
by = cudf.Grouper(
key=on, freq=rule, closed=closed, label=label, level=level
)
return (
cudf.core.resample.SeriesResampler(self, by=by)
if isinstance(self, cudf.Series)
else cudf.core.resample.DataFrameResampler(self, by=by)
)
def dropna(
self, axis=0, how="any", thresh=None, subset=None, inplace=False
):
"""
Drop rows (or columns) containing nulls from a Column.
Parameters
----------
axis : {0, 1}, optional
Whether to drop rows (axis=0, default) or columns (axis=1)
containing nulls.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row (or column).
any (default) drops rows (or columns) containing at least
one null value. all drops only rows (or columns) containing
*all* null values.
thresh: int, optional
If specified, then drops every row (or column) containing
less than `thresh` non-null values
subset : list, optional
List of columns to consider when dropping rows (all columns
are considered by default). Alternatively, when dropping
columns, subset is a list of rows to consider.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
Copy of the DataFrame with rows/columns containing nulls dropped.
See Also
--------
cudf.DataFrame.isna
Indicate null values.
cudf.DataFrame.notna
Indicate non-null values.
cudf.DataFrame.fillna
Replace null values.
cudf.Series.dropna
Drop null values.
cudf.Index.dropna
Drop null indices.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": ['Batmobile', None, 'Bullwhip'],
... "born": [np.datetime64("1940-04-25"),
... np.datetime64("NaT"),
... np.datetime64("NaT")]})
>>> df
name toy born
0 Alfred Batmobile 1940-04-25 00:00:00
1 Batman <NA> <NA>
2 Catwoman Bullwhip <NA>
Drop the rows where at least one element is null.
>>> df.dropna()
name toy born
0 Alfred Batmobile 1940-04-25
Drop the columns where at least one element is null.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are null.
>>> df.dropna(how='all')
name toy born
0 Alfred Batmobile 1940-04-25 00:00:00
1 Batman <NA> <NA>
2 Catwoman Bullwhip <NA>
Keep only the rows with at least 2 non-null values.
>>> df.dropna(thresh=2)
name toy born
0 Alfred Batmobile 1940-04-25 00:00:00
2 Catwoman Bullwhip <NA>
Define in which columns to look for null values.
>>> df.dropna(subset=['name', 'born'])
name toy born
0 Alfred Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
0 Alfred Batmobile 1940-04-25
"""
if axis == 0:
result = self._drop_na_rows(how=how, subset=subset, thresh=thresh)
else:
result = self._drop_na_columns(
how=how, subset=subset, thresh=thresh
)
return self._mimic_inplace(result, inplace=inplace)
def _drop_na_rows(self, how="any", subset=None, thresh=None):
"""
Drop null rows from `self`.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row.
any (default) drops rows containing at least
one null value. all drops only rows containing
*all* null values.
subset : list, optional
List of columns to consider when dropping rows.
thresh : int, optional
If specified, then drops every row containing
less than `thresh` non-null values.
"""
subset = self._preprocess_subset(subset)
if len(subset) == 0:
return self.copy(deep=True)
data_columns = [
col.nans_to_nulls()
if isinstance(col, cudf.core.column.NumericalColumn)
else col
for col in self._columns
]
return self._from_columns_like_self(
libcudf.stream_compaction.drop_nulls(
[*self._index._data.columns, *data_columns],
how=how,
keys=self._positions_from_column_names(
subset, offset_by_index_columns=True
),
thresh=thresh,
),
self._column_names,
self._index.names,
)
def _apply_boolean_mask(self, boolean_mask: BooleanMask, keep_index=True):
"""Apply boolean mask to each row of `self`.
Rows corresponding to `False` is dropped.
If keep_index is False, the index is not preserved.
"""
if len(boolean_mask.column) != len(self):
raise IndexError(
"Boolean mask has wrong length: "
f"{len(boolean_mask.column)} not {len(self)}"
)
return self._from_columns_like_self(
libcudf.stream_compaction.apply_boolean_mask(
list(self._index._columns + self._columns)
if keep_index
else list(self._columns),
boolean_mask.column,
),
column_names=self._column_names,
index_names=self._index.names if keep_index else None,
)
def take(self, indices, axis=0):
"""Return a new frame containing the rows specified by *indices*.
Parameters
----------
indices : array-like
Array of ints indicating which positions to take.
axis : Unsupported
Returns
-------
out : Series or DataFrame
New object with desired subset of rows.
Examples
--------
**Series**
>>> s = cudf.Series(['a', 'b', 'c', 'd', 'e'])
>>> s.take([2, 0, 4, 3])
2 c
0 a
4 e
3 d
dtype: object
**DataFrame**
>>> a = cudf.DataFrame({'a': [1.0, 2.0, 3.0],
... 'b': cudf.Series(['a', 'b', 'c'])})
>>> a.take([0, 2, 2])
a b
0 1.0 a
2 3.0 c
2 3.0 c
>>> a.take([True, False, True])
a b
0 1.0 a
2 3.0 c
"""
if self._get_axis_from_axis_arg(axis) != 0:
raise NotImplementedError("Only axis=0 is supported.")
return self._gather(GatherMap(indices, len(self), nullify=False))
def _reset_index(self, level, drop, col_level=0, col_fill=""):
"""Shared path for DataFrame.reset_index and Series.reset_index."""
if level is not None and not isinstance(level, (tuple, list)):
level = (level,)
_check_duplicate_level_names(level, self._index.names)
# Split the columns in the index into data and index columns
(
data_columns,
index_columns,
data_names,
index_names,
) = self._index._split_columns_by_levels(level)
if index_columns:
index = _index_from_columns(
index_columns,
name=self._index.name,
)
if isinstance(index, MultiIndex):
index.names = index_names
else:
index.name = index_names[0]
else:
index = RangeIndex(len(self))
if drop:
return self._data, index
new_column_data = {}
for name, col in zip(data_names, data_columns):
if name == "index" and "index" in self._data:
name = "level_0"
name = (
tuple(
name if i == col_level else col_fill
for i in range(self._data.nlevels)
)
if self._data.multiindex
else name
)
new_column_data[name] = col
# This is to match pandas where the new data columns are always
# inserted to the left of existing data columns.
return (
ColumnAccessor(
{**new_column_data, **self._data},
self._data.multiindex,
self._data._level_names,
),
index,
)
def _first_or_last(
self, offset, idx: int, op: Callable, side: str, slice_func: Callable
) -> "IndexedFrame":
"""Shared code path for ``first`` and ``last``."""
if not isinstance(self._index, cudf.core.index.DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index.")
if not isinstance(offset, str):
raise NotImplementedError(
f"Unsupported offset type {type(offset)}."
)
if len(self) == 0:
return self.copy()
pd_offset = pd.tseries.frequencies.to_offset(offset)
to_search = op(
pd.Timestamp(self._index._column.element_indexing(idx)), pd_offset
)
if (
idx == 0
and not isinstance(pd_offset, pd.tseries.offsets.Tick)
and pd_offset.is_on_offset(pd.Timestamp(self._index[0]))
):
# Special handle is required when the start time of the index
# is on the end of the offset. See pandas gh29623 for detail.
to_search = to_search - pd_offset.base
return self.loc[:to_search]
needle = as_column(to_search, dtype=self._index.dtype)
end_point = int(
self._index._column.searchsorted(
needle, side=side
).element_indexing(0)
)
return slice_func(end_point)
def first(self, offset):
"""Select initial periods of time series data based on a date offset.
When having a DataFrame with **sorted** dates as index, this function
can select the first few rows based on a date offset.
Parameters
----------
offset: str
The offset length of the data that will be selected. For instance,
'1M' will display all rows having their index within the first
month.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a ``DatetimeIndex``
Examples
--------
>>> i = cudf.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = cudf.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
"""
return self._first_or_last(
offset,
idx=0,
op=operator.__add__,
side="left",
slice_func=lambda i: self.iloc[:i],
)
def last(self, offset):
"""Select final periods of time series data based on a date offset.
When having a DataFrame with **sorted** dates as index, this function
can select the last few rows based on a date offset.
Parameters
----------
offset: str
The offset length of the data that will be selected. For instance,
'3D' will display all rows having their index within the last 3
days.
Returns
-------
Series or DataFrame
A subset of the caller.
Raises
------
TypeError
If the index is not a ``DatetimeIndex``
Examples
--------
>>> i = cudf.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = cudf.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
"""
return self._first_or_last(
offset,
idx=-1,
op=operator.__sub__,
side="right",
slice_func=lambda i: self.iloc[i:],
)
@_cudf_nvtx_annotate
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
ignore_index=False,
):
"""Return a random sample of items from an axis of object.
If reproducible results are required, a random number generator may be
provided via the `random_state` parameter. This function will always
produce the same sample given an identical `random_state`.
Notes
-----
When sampling from ``axis=0/'index'``, ``random_state`` can be either
a numpy random state (``numpy.random.RandomState``) or a cupy random
state (``cupy.random.RandomState``). When a numpy random state is
used, the output is guaranteed to match the output of the corresponding
pandas method call, but generating the sample may be slow. If exact
pandas equivalence is not required, using a cupy random state will
achieve better performance, especially when sampling large number of
items. It's advised to use the matching `ndarray` type to the random
state for the `weights` array.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if frac = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with n.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
`replace == True` is not supported for axis = 1/"columns".
`replace == False` is not supported for axis = 0/"index" given
`random_state` is `None` or a cupy random state, and `weights` is
specified.
weights : ndarray-like, optional
Default `None` for uniform probability distribution over rows to
sample from. If `ndarray` is passed, the length of `weights` should
equal to the number of rows to sample from, and will be normalized
to have a sum of 1. Unlike pandas, index alignment is not currently
not performed.
random_state : int, numpy/cupy RandomState, or None, default None
If None, default cupy random state is chosen.
If int, the seed for the default cupy random state.
If RandomState, rows-to-sample are generated from the RandomState.
axis : {0 or `index`, 1 or `columns`, None}, default None
Axis to sample. Accepts axis number or name.
Default is stat axis for given data type
(0 for Series and DataFrames). Series doesn't support axis=1.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series or DataFrame
A new object of same type as caller containing n items
randomly sampled from the caller object.
Examples
--------
>>> import cudf as cudf
>>> df = cudf.DataFrame({"a":{1, 2, 3, 4, 5}})
>>> df.sample(3)
a
1 2
3 4
0 1
>>> sr = cudf.Series([1, 2, 3, 4, 5])
>>> sr.sample(10, replace=True)
1 4
3 1
2 4
0 5
0 1
4 5
4 1
0 2
0 3
3 2
dtype: int64
>>> df = cudf.DataFrame(
... {"a": [1, 2], "b": [2, 3], "c": [3, 4], "d": [4, 5]}
... )
>>> df.sample(2, axis=1)
a c
0 1 3
1 2 4
"""
axis = 0 if axis is None else self._get_axis_from_axis_arg(axis)
size = self.shape[axis]
# Compute `n` from parameter `frac`.
if frac is None:
n = 1 if n is None else n
else:
if frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when upsampling the "
"population `frac` > 1."
)
if n is not None:
raise ValueError(
"Please enter a value for `frac` OR `n`, not both."
)
n = int(round(size * frac))
if n > 0 and size == 0:
raise ValueError(
"Cannot take a sample larger than 0 when axis is empty."
)
if isinstance(random_state, cp.random.RandomState):
lib = cp
elif isinstance(random_state, np.random.RandomState):
lib = np
else:
# Construct random state if `random_state` parameter is None or a
# seed. By default, cupy random state is used to sample rows
# and numpy is used to sample columns. This is because row data
# is stored on device, and the column objects are stored on host.
lib = cp if axis == 0 else np
random_state = lib.random.RandomState(seed=random_state)
# Normalize `weights` array.
if weights is not None:
if isinstance(weights, str):
raise NotImplementedError(
"Weights specified by string is unsupported yet."
)
if size != len(weights):
raise ValueError(
"Weights and axis to be sampled must be of same length."
)
weights = lib.asarray(weights)
weights = weights / weights.sum()
if axis == 0:
return self._sample_axis_0(
n, weights, replace, random_state, ignore_index
)
else:
if isinstance(random_state, cp.random.RandomState):
raise ValueError(
"Sampling from `axis=1`/`columns` with cupy random state"
"isn't supported."
)
return self._sample_axis_1(
n, weights, replace, random_state, ignore_index
)
def _sample_axis_0(
self,
n: int,
weights: Optional[ColumnLike],
replace: bool,
random_state: Union[np.random.RandomState, cp.random.RandomState],
ignore_index: bool,
):
try:
gather_map = GatherMap.from_column_unchecked(
cudf.core.column.as_column(
random_state.choice(
len(self), size=n, replace=replace, p=weights
)
),
len(self),
nullify=False,
)
except NotImplementedError as e:
raise NotImplementedError(
"Random sampling with cupy does not support these inputs."
) from e
return self._gather(gather_map, keep_index=not ignore_index)
def _sample_axis_1(
self,
n: int,
weights: Optional[ColumnLike],
replace: bool,
random_state: np.random.RandomState,
ignore_index: bool,
):
raise NotImplementedError(
f"Sampling from axis 1 is not implemented for {self.__class__}."
)
def _binaryop(
self,
other: Any,
op: str,
fill_value: Any = None,
can_reindex: bool = False,
*args,
**kwargs,
):
reflect, op = self._check_reflected_op(op)
(
operands,
out_index,
can_use_self_column_name,
) = self._make_operands_and_index_for_binop(
other, op, fill_value, reflect, can_reindex
)
if operands is NotImplemented:
return NotImplemented
level_names = (
self._data._level_names if can_use_self_column_name else None
)
return self._from_data(
ColumnAccessor(
type(self)._colwise_binop(operands, op),
level_names=level_names,
),
index=out_index,
)
def _make_operands_and_index_for_binop(
self,
other: Any,
fn: str,
fill_value: Any = None,
reflect: bool = False,
can_reindex: bool = False,
*args,
**kwargs,
) -> Tuple[
Union[
Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]],
NotImplementedType,
],
Optional[cudf.BaseIndex],
bool,
]:
raise NotImplementedError(
f"Binary operations are not supported for {self.__class__}"
)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
ret = super().__array_ufunc__(ufunc, method, *inputs, **kwargs)
fname = ufunc.__name__
if ret is not None:
# pandas bitwise operations return bools if indexes are misaligned.
if "bitwise" in fname:
reflect = self is not inputs[0]
other = inputs[0] if reflect else inputs[1]
if isinstance(other, self.__class__) and not self.index.equals(
other.index
):
ret = ret.astype(bool)
return ret
# Attempt to dispatch all other functions to cupy.
cupy_func = getattr(cp, fname)
if cupy_func:
if ufunc.nin == 2:
other = inputs[self is inputs[0]]
inputs, index, _ = self._make_operands_and_index_for_binop(
other, fname
)
else:
# This works for Index too
inputs = {
name: (col, None, False, None)
for name, col in self._data.items()
}
index = self._index
data = self._apply_cupy_ufunc_to_operands(
ufunc, cupy_func, inputs, **kwargs
)
out = tuple(self._from_data(out, index=index) for out in data)
return out[0] if ufunc.nout == 1 else out
return NotImplemented
@_cudf_nvtx_annotate
def repeat(self, repeats, axis=None):
"""Repeats elements consecutively.
Returns a new object of caller type(DataFrame/Series) where each
element of the current object is repeated consecutively a given
number of times.
Parameters
----------
repeats : int, or array of ints
The number of repetitions for each element. This should
be a non-negative integer. Repeating 0 times will return
an empty object.
Returns
-------
Series/DataFrame
A newly created object of same type as caller
with repeated elements.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3], 'b': [10, 20, 30]})
>>> df
a b
0 1 10
1 2 20
2 3 30
>>> df.repeat(3)
a b
0 1 10
0 1 10
0 1 10
1 2 20
1 2 20
1 2 20
2 3 30
2 3 30
2 3 30
Repeat on Series
>>> s = cudf.Series([0, 2])
>>> s
0 0
1 2
dtype: int64
>>> s.repeat([3, 4])
0 0
0 0
0 0
1 2
1 2
1 2
1 2
dtype: int64
>>> s.repeat(2)
0 0
0 0
1 2
1 2
dtype: int64
"""
return self._from_columns_like_self(
Frame._repeat(
[*self._index._data.columns, *self._columns], repeats, axis
),
self._column_names,
self._index_names,
)
def _append(
self, other, ignore_index=False, verify_integrity=False, sort=None
):
# Note: Do not remove this function until pandas does. This warning is
# to clean up cudf but to match a deprecation in pandas
warnings.warn(
"The append method is deprecated and will be removed in a future "
"version. Use cudf.concat instead.",
FutureWarning,
)
if verify_integrity not in (None, False):
raise NotImplementedError(
"verify_integrity parameter is not supported yet."
)
if is_list_like(other):
to_concat = [self, *other]
else:
to_concat = [self, other]
return cudf.concat(to_concat, ignore_index=ignore_index, sort=sort)
def astype(self, dtype, copy=False, errors="raise", **kwargs):
"""Cast the object to the given dtype.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a :class:`numpy.dtype` or Python type to cast entire DataFrame
object to the same type. Alternatively, use ``{col: dtype, ...}``,
where col is a column label and dtype is a :class:`numpy.dtype`
or Python type to cast one or more of the DataFrame's columns to
column-specific types.
copy : bool, default False
Return a deep-copy when ``copy=True``. Note by default
``copy=False`` setting is used and hence changes to
values then may propagate to other cudf objects.
errors : {'raise', 'ignore', 'warn'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original
object.
**kwargs : extra arguments to pass on to the constructor
Returns
-------
DataFrame/Series
Examples
--------
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'a': [10, 20, 30], 'b': [1, 2, 3]})
>>> df
a b
0 10 1
1 20 2
2 30 3
>>> df.dtypes
a int64
b int64
dtype: object
Cast all columns to `int32`:
>>> df.astype('int32').dtypes
a int32
b int32
dtype: object
Cast `a` to `float32` using a dictionary:
>>> df.astype({'a': 'float32'}).dtypes
a float32
b int64
dtype: object
>>> df.astype({'a': 'float32'})
a b
0 10.0 1
1 20.0 2
2 30.0 3
**Series**
>>> import cudf
>>> series = cudf.Series([1, 2], dtype='int32')
>>> series
0 1
1 2
dtype: int32
>>> series.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> series.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = cudf.CategoricalDtype(categories=[2, 1], ordered=True)
>>> series.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` (enabled by default)
and changing data on a new Series will
propagate changes:
>>> s1 = cudf.Series([1, 2])
>>> s1
0 1
1 2
dtype: int64
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1
0 10
1 2
dtype: int64
"""
if errors not in ("ignore", "raise"):
raise ValueError("invalid error value specified")
try:
data = super().astype(dtype, copy, **kwargs)
except Exception as e:
if errors == "raise":
raise e
return self
return self._from_data(data, index=self._index)
@_cudf_nvtx_annotate
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or Series
DataFrame or Series without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.reindex
Return only specified index labels of Series
Series.dropna
Return series without null values
Series.drop_duplicates
Return series with duplicate values removed
Examples
--------
**Series**
>>> s = cudf.Series([1,2,3], index=['x', 'y', 'z'])
>>> s
x 1
y 2
z 3
dtype: int64
Drop labels x and z
>>> s.drop(labels=['x', 'z'])
y 2
dtype: int64
Drop a label from the second level in MultiIndex Series.
>>> midx = cudf.MultiIndex.from_product([[0, 1, 2], ['x', 'y']])
>>> s = cudf.Series(range(6), index=midx)
>>> s
0 x 0
y 1
1 x 2
y 3
2 x 4
y 5
dtype: int64
>>> s.drop(labels='y', level=1)
0 x 0
1 x 2
2 x 4
Name: 2, dtype: int64
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({"A": [1, 2, 3, 4],
... "B": [5, 6, 7, 8],
... "C": [10, 11, 12, 13],
... "D": [20, 30, 40, 50]})
>>> df
A B C D
0 1 5 10 20
1 2 6 11 30
2 3 7 12 40
3 4 8 13 50
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 1 20
1 2 30
2 3 40
3 4 50
>>> df.drop(columns=['B', 'C'])
A D
0 1 20
1 2 30
2 3 40
3 4 50
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 3 7 12 40
3 4 8 13 50
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = cudf.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = cudf.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
if labels is not None:
if index is not None or columns is not None:
raise ValueError(
"Cannot specify both 'labels' and 'index'/'columns'"
)
target = labels
elif index is not None:
target = index
axis = 0
elif columns is not None:
target = columns
axis = 1
else:
raise ValueError(
"Need to specify at least one of 'labels', "
"'index' or 'columns'"
)
if inplace:
out = self
else:
out = self.copy()
if axis in (1, "columns"):
target = _get_host_unique(target)
_drop_columns(out, target, errors)
elif axis in (0, "index"):
dropped = _drop_rows_by_labels(out, target, level, errors)
if columns is not None:
columns = _get_host_unique(columns)
_drop_columns(dropped, columns, errors)
out._data = dropped._data
out._index = dropped._index
if not inplace:
return out
@_cudf_nvtx_annotate
def _explode(self, explode_column: Any, ignore_index: bool):
# Helper function for `explode` in `Series` and `Dataframe`, explodes a
# specified nested column. Other columns' corresponding rows are
# duplicated. If ignore_index is set, the original index is not
# exploded and will be replaced with a `RangeIndex`.
if not is_list_dtype(self._data[explode_column].dtype):
data = self._data.copy(deep=True)
idx = None if ignore_index else self._index.copy(deep=True)
return self.__class__._from_data(data, index=idx)
column_index = self._column_names.index(explode_column)
if not ignore_index and self._index is not None:
index_offset = self._index.nlevels
else:
index_offset = 0
exploded = libcudf.lists.explode_outer(
[
*(self._index._data.columns if not ignore_index else ()),
*self._columns,
],
column_index + index_offset,
)
# We must copy inner datatype of the exploded list column to
# maintain struct dtype key names
exploded_dtype = cast(
ListDtype, self._columns[column_index].dtype
).element_type
return self._from_columns_like_self(
exploded,
self._column_names,
self._index_names if not ignore_index else None,
override_dtypes=(
exploded_dtype if i == column_index else None
for i in range(len(self._columns))
),
)
@_cudf_nvtx_annotate
def tile(self, count):
"""Repeats the rows `count` times to form a new Frame.
Parameters
----------
self : input Table containing columns to interleave.
count : Number of times to tile "rows". Must be non-negative.
Examples
--------
>>> import cudf
>>> df = cudf.Dataframe([[8, 4, 7], [5, 2, 3]])
>>> count = 2
>>> df.tile(df, count)
0 1 2
0 8 4 7
1 5 2 3
0 8 4 7
1 5 2 3
Returns
-------
The indexed frame containing the tiled "rows".
"""
return self._from_columns_like_self(
libcudf.reshape.tile(
[*self._index._columns, *self._columns], count
),
column_names=self._column_names,
index_names=self._index_names,
)
@_cudf_nvtx_annotate
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=no_default,
group_keys=False,
squeeze=False,
observed=True,
dropna=True,
):
if sort is no_default:
sort = cudf.get_option("mode.pandas_compatible")
if axis not in (0, "index"):
raise NotImplementedError("axis parameter is not yet implemented")
if squeeze is not False:
raise NotImplementedError(
"squeeze parameter is not yet implemented"
)
if not observed:
raise NotImplementedError(
"observed parameter is not yet implemented"
)
if by is None and level is None:
raise TypeError(
"groupby() requires either by or level to be specified."
)
if group_keys is None:
group_keys = False
return (
self.__class__._resampler(self, by=by)
if isinstance(by, cudf.Grouper) and by.freq
else self.__class__._groupby(
self,
by=by,
level=level,
as_index=as_index,
dropna=dropna,
sort=sort,
group_keys=group_keys,
)
)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Addition",
op_name="add",
equivalent_op="frame + other",
df_op_example=textwrap.dedent(
"""
>>> df.add(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
""",
),
ser_op_example=textwrap.dedent(
"""
>>> a.add(b)
a 2
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.add(b, fill_value=0)
a 2
b 1
c 1
d 1
e <NA>
dtype: int64
"""
),
)
)
def add(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__add__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Addition",
op_name="radd",
equivalent_op="other + frame",
df_op_example=textwrap.dedent(
"""
>>> df.radd(1)
angles degrees
circle 1 361
triangle 4 181
rectangle 5 361
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.radd(b)
a 2
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.radd(b, fill_value=0)
a 2
b 1
c 1
d 1
e <NA>
dtype: int64
"""
),
)
)
def radd(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__radd__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Subtraction",
op_name="sub",
equivalent_op="frame - other",
df_op_example=textwrap.dedent(
"""
>>> df.sub(1)
angles degrees
circle -1 359
triangle 2 179
rectangle 3 359
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.sub(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.sub(b, fill_value=0)
a 2
b 1
c 1
d -1
e <NA>
dtype: int64
"""
),
)
)
def subtract(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__sub__", fill_value)
sub = subtract
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Subtraction",
op_name="rsub",
equivalent_op="other - frame",
df_op_example=textwrap.dedent(
"""
>>> df.rsub(1)
angles degrees
circle 1 -359
triangle -2 -179
rectangle -3 -359
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rsub(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rsub(b, fill_value=0)
a 0
b -1
c -1
d 1
e <NA>
dtype: int64
"""
),
)
)
def rsub(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rsub__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Multiplication",
op_name="mul",
equivalent_op="frame * other",
df_op_example=textwrap.dedent(
"""
>>> df.multiply(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.multiply(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.multiply(b, fill_value=0)
a 1
b 0
c 0
d 0
e <NA>
dtype: int64
"""
),
)
)
def multiply(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__mul__", fill_value)
mul = multiply
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Multiplication",
op_name="rmul",
equivalent_op="other * frame",
df_op_example=textwrap.dedent(
"""
>>> df.rmul(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rmul(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rmul(b, fill_value=0)
a 1
b 0
c 0
d 0
e <NA>
dtype: int64
"""
),
)
)
def rmul(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rmul__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Modulo",
op_name="mod",
equivalent_op="frame % other",
df_op_example=textwrap.dedent(
"""
>>> df.mod(1)
angles degrees
circle 0 0
triangle 0 0
rectangle 0 0
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.mod(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.mod(b, fill_value=0)
a 0
b 4294967295
c 4294967295
d 0
e <NA>
dtype: int64
"""
),
)
)
def mod(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__mod__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Modulo",
op_name="rmod",
equivalent_op="other % frame",
df_op_example=textwrap.dedent(
"""
>>> df.rmod(1)
angles degrees
circle 4294967295 1
triangle 1 1
rectangle 1 1
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rmod(b)
a 0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rmod(b, fill_value=0)
a 0
b 0
c 0
d 4294967295
e <NA>
dtype: int64
"""
),
)
)
def rmod(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rmod__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Exponential",
op_name="pow",
equivalent_op="frame ** other",
df_op_example=textwrap.dedent(
"""
>>> df.pow(1)
angles degrees
circle 0 360
triangle 2 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.pow(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.pow(b, fill_value=0)
a 1
b 1
c 1
d 0
e <NA>
dtype: int64
"""
),
)
)
def pow(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__pow__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Exponential",
op_name="rpow",
equivalent_op="other ** frame",
df_op_example=textwrap.dedent(
"""
>>> df.rpow(1)
angles degrees
circle 1 1
triangle 1 1
rectangle 1 1
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rpow(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rpow(b, fill_value=0)
a 1
b 0
c 0
d 1
e <NA>
dtype: int64
"""
),
)
)
def rpow(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rpow__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Integer division",
op_name="floordiv",
equivalent_op="frame // other",
df_op_example=textwrap.dedent(
"""
>>> df.floordiv(1)
angles degrees
circle 0 360
triangle 3 180
rectangle 4 360
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.floordiv(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.floordiv(b, fill_value=0)
a 1
b 9223372036854775807
c 9223372036854775807
d 0
e <NA>
dtype: int64
"""
),
)
)
def floordiv(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__floordiv__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Integer division",
op_name="rfloordiv",
equivalent_op="other // frame",
df_op_example=textwrap.dedent(
"""
>>> df.rfloordiv(1)
angles degrees
circle 9223372036854775807 0
triangle 0 0
rectangle 0 0
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rfloordiv(b)
a 1
b <NA>
c <NA>
d <NA>
e <NA>
dtype: int64
>>> a.rfloordiv(b, fill_value=0)
a 1
b 0
c 0
d 9223372036854775807
e <NA>
dtype: int64
"""
),
)
)
def rfloordiv(
self, other, axis, level=None, fill_value=None
): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rfloordiv__", fill_value)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Floating division",
op_name="truediv",
equivalent_op="frame / other",
df_op_example=textwrap.dedent(
"""
>>> df.truediv(1)
angles degrees
circle 0.0 360.0
triangle 3.0 180.0
rectangle 4.0 360.0
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.truediv(b)
a 1.0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: float64
>>> a.truediv(b, fill_value=0)
a 1.0
b Inf
c Inf
d 0.0
e <NA>
dtype: float64
"""
),
)
)
def truediv(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__truediv__", fill_value)
# Alias for truediv
div = truediv
divide = truediv
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Floating division",
op_name="rtruediv",
equivalent_op="other / frame",
df_op_example=textwrap.dedent(
"""
>>> df.rtruediv(1)
angles degrees
circle inf 0.002778
triangle 0.333333 0.005556
rectangle 0.250000 0.002778
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.rtruediv(b)
a 1.0
b <NA>
c <NA>
d <NA>
e <NA>
dtype: float64
>>> a.rtruediv(b, fill_value=0)
a 1.0
b 0.0
c 0.0
d Inf
e <NA>
dtype: float64
"""
),
)
)
def rtruediv(self, other, axis, level=None, fill_value=None): # noqa: D102
if level is not None:
raise NotImplementedError("level parameter is not supported yet.")
return self._binaryop(other, "__rtruediv__", fill_value)
# Alias for rtruediv
rdiv = rtruediv
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Equal to",
op_name="eq",
equivalent_op="frame == other",
df_op_example=textwrap.dedent(
"""
>>> df.eq(1)
angles degrees
circle False False
triangle False False
rectangle False False
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.eq(b)
a True
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.eq(b, fill_value=0)
a True
b False
c False
d False
e <NA>
dtype: bool
"""
),
)
)
def eq(
self, other, axis="columns", level=None, fill_value=None
): # noqa: D102
return self._binaryop(
other=other, op="__eq__", fill_value=fill_value, can_reindex=True
)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Not equal to",
op_name="ne",
equivalent_op="frame != other",
df_op_example=textwrap.dedent(
"""
>>> df.ne(1)
angles degrees
circle True True
triangle True True
rectangle True True
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.ne(b)
a False
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.ne(b, fill_value=0)
a False
b True
c True
d True
e <NA>
dtype: bool
"""
),
)
)
def ne(
self, other, axis="columns", level=None, fill_value=None
): # noqa: D102
return self._binaryop(
other=other, op="__ne__", fill_value=fill_value, can_reindex=True
)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Less than",
op_name="lt",
equivalent_op="frame < other",
df_op_example=textwrap.dedent(
"""
>>> df.lt(1)
angles degrees
circle True False
triangle False False
rectangle False False
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.lt(b)
a False
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.lt(b, fill_value=0)
a False
b False
c False
d True
e <NA>
dtype: bool
"""
),
)
)
def lt(
self, other, axis="columns", level=None, fill_value=None
): # noqa: D102
return self._binaryop(
other=other, op="__lt__", fill_value=fill_value, can_reindex=True
)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Less than or equal to",
op_name="le",
equivalent_op="frame <= other",
df_op_example=textwrap.dedent(
"""
>>> df.le(1)
angles degrees
circle True False
triangle False False
rectangle False False
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.le(b)
a True
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.le(b, fill_value=0)
a True
b False
c False
d True
e <NA>
dtype: bool
"""
),
)
)
def le(
self, other, axis="columns", level=None, fill_value=None
): # noqa: D102
return self._binaryop(
other=other, op="__le__", fill_value=fill_value, can_reindex=True
)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Greater than",
op_name="gt",
equivalent_op="frame > other",
df_op_example=textwrap.dedent(
"""
>>> df.gt(1)
angles degrees
circle False True
triangle True True
rectangle True True
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.gt(b)
a False
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.gt(b, fill_value=0)
a False
b True
c True
d False
e <NA>
dtype: bool
"""
),
)
)
def gt(
self, other, axis="columns", level=None, fill_value=None
): # noqa: D102
return self._binaryop(
other=other, op="__gt__", fill_value=fill_value, can_reindex=True
)
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_binop_template.format(
operation="Greater than or equal to",
op_name="ge",
equivalent_op="frame >= other",
df_op_example=textwrap.dedent(
"""
>>> df.ge(1)
angles degrees
circle False True
triangle True True
rectangle True True
"""
),
ser_op_example=textwrap.dedent(
"""
>>> a.ge(b)
a True
b <NA>
c <NA>
d <NA>
e <NA>
dtype: bool
>>> a.ge(b, fill_value=0)
a True
b True
c True
d False
e <NA>
dtype: bool
"""
),
)
)
def ge(
self, other, axis="columns", level=None, fill_value=None
): # noqa: D102
return self._binaryop(
other=other, op="__ge__", fill_value=fill_value, can_reindex=True
)
def _preprocess_subset(self, subset):
if subset is None:
subset = self._column_names
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self._data.names
):
subset = (subset,)
diff = set(subset) - set(self._data)
if len(diff) != 0:
raise KeyError(f"columns {diff} do not exist")
return subset
@_cudf_nvtx_annotate
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
"""
if method not in {"average", "min", "max", "first", "dense"}:
raise KeyError(method)
method_enum = libcudf.aggregation.RankMethod[method.upper()]
if na_option not in {"keep", "top", "bottom"}:
raise ValueError(
"na_option must be one of 'keep', 'top', or 'bottom'"
)
if axis not in (0, "index"):
raise NotImplementedError(
f"axis must be `0`/`index`, "
f"axis={axis} is not yet supported in rank"
)
source = self
if numeric_only:
numeric_cols = (
name
for name in self._data.names
if _is_non_decimal_numeric_dtype(self._data[name])
)
source = self._get_columns_by_label(numeric_cols)
if source.empty:
return source.astype("float64")
result_columns = libcudf.sort.rank_columns(
[*source._columns], method_enum, na_option, ascending, pct
)
return self.__class__._from_data(
dict(zip(source._column_names, result_columns)),
index=source._index,
).astype(np.float64)
def convert_dtypes(
self,
infer_objects=True,
convert_string=True,
convert_integer=True,
convert_boolean=True,
convert_floating=True,
dtype_backend=None,
):
"""
Convert columns to the best possible nullable dtypes.
If the dtype is numeric, and consists of all integers, convert
to an appropriate integer extension type. Otherwise, convert
to an appropriate floating type.
All other dtypes are always returned as-is as all dtypes in
cudf are nullable.
"""
result = self.copy()
if convert_floating:
# cast any floating columns to int64 if
# they are all integer data:
for name, col in result._data.items():
if col.dtype.kind == "f":
col = col.fillna(0)
if cp.allclose(col, col.astype("int64")):
result._data[name] = col.astype("int64")
return result
@_warn_no_dask_cudf
def __dask_tokenize__(self):
return [
type(self),
self._dtypes,
self.index,
self.hash_values().values_host,
]
def _check_duplicate_level_names(specified, level_names):
"""Raise if any of `specified` has duplicates in `level_names`."""
if specified is None:
return
if len(set(level_names)) == len(level_names):
return
duplicates = {key for key, val in Counter(level_names).items() if val > 1}
duplicates_specified = [spec for spec in specified if spec in duplicates]
if not len(duplicates_specified) == 0:
# Note: pandas raises first encountered duplicates, cuDF raises all.
raise ValueError(
f"The names {duplicates_specified} occurs multiple times, use a"
" level number"
)
@_cudf_nvtx_annotate
def _get_replacement_values_for_columns(
to_replace: Any, value: Any, columns_dtype_map: Dict[Any, Any]
) -> Tuple[Dict[Any, bool], Dict[Any, Any], Dict[Any, Any]]:
"""
Returns a per column mapping for the values to be replaced, new
values to be replaced with and if all the values are empty.
Parameters
----------
to_replace : numeric, str, list-like or dict
Contains the values to be replaced.
value : numeric, str, list-like, or dict
Contains the values to replace `to_replace` with.
columns_dtype_map : dict
A column to dtype mapping representing dtype of columns.
Returns
-------
all_na_columns : dict
A dict mapping of all columns if they contain all na values
to_replace_columns : dict
A dict mapping of all columns and the existing values that
have to be replaced.
values_columns : dict
A dict mapping of all columns and the corresponding values
to be replaced with.
"""
to_replace_columns: Dict[Any, Any] = {}
values_columns: Dict[Any, Any] = {}
all_na_columns: Dict[Any, Any] = {}
if is_scalar(to_replace) and is_scalar(value):
to_replace_columns = {col: [to_replace] for col in columns_dtype_map}
values_columns = {col: [value] for col in columns_dtype_map}
elif cudf.api.types.is_list_like(to_replace) or isinstance(
to_replace, ColumnBase
):
if is_scalar(value):
to_replace_columns = {col: to_replace for col in columns_dtype_map}
values_columns = {
col: [value]
if _is_non_decimal_numeric_dtype(columns_dtype_map[col])
else full(
len(to_replace),
value,
cudf.dtype(type(value)),
)
for col in columns_dtype_map
}
elif cudf.api.types.is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
f"Replacement lists must be "
f"of same length."
f" Expected {len(to_replace)}, got {len(value)}."
)
else:
to_replace_columns = {
col: to_replace for col in columns_dtype_map
}
values_columns = {col: value for col in columns_dtype_map}
elif cudf.utils.dtypes.is_column_like(value):
to_replace_columns = {col: to_replace for col in columns_dtype_map}
values_columns = {col: value for col in columns_dtype_map}
else:
raise TypeError(
"value argument must be scalar, list-like or Series"
)
elif _is_series(to_replace):
if value is None:
to_replace_columns = {
col: as_column(to_replace.index) for col in columns_dtype_map
}
values_columns = {col: to_replace for col in columns_dtype_map}
elif is_dict_like(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: value[col] for col in to_replace_columns if col in value
}
elif is_scalar(value) or _is_series(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: [value] if is_scalar(value) else value[col]
for col in to_replace_columns
if col in value
}
else:
raise ValueError(
"Series.replace cannot use dict-like to_replace and non-None "
"value"
)
elif is_dict_like(to_replace):
if value is None:
to_replace_columns = {
col: list(to_replace.keys()) for col in columns_dtype_map
}
values_columns = {
col: list(to_replace.values()) for col in columns_dtype_map
}
elif is_dict_like(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: value[col] for col in columns_dtype_map if col in value
}
elif is_scalar(value) or _is_series(value):
to_replace_columns = {
col: to_replace[col]
for col in columns_dtype_map
if col in to_replace
}
values_columns = {
col: [value] if is_scalar(value) else value
for col in columns_dtype_map
if col in to_replace
}
else:
raise TypeError("value argument must be scalar, dict, or Series")
else:
raise TypeError(
"Expecting 'to_replace' to be either a scalar, array-like, "
"dict or None, got invalid type "
f"'{type(to_replace).__name__}'"
)
to_replace_columns = {
key: [value] if is_scalar(value) else value
for key, value in to_replace_columns.items()
}
values_columns = {
key: [value] if is_scalar(value) else value
for key, value in values_columns.items()
}
for i in to_replace_columns:
if i in values_columns:
if isinstance(values_columns[i], list):
all_na = values_columns[i].count(None) == len(
values_columns[i]
)
else:
all_na = False
all_na_columns[i] = all_na
return all_na_columns, to_replace_columns, values_columns
def _is_series(obj):
"""
Checks if the `obj` is of type `cudf.Series`
instead of checking for isinstance(obj, cudf.Series)
"""
return isinstance(obj, Frame) and obj.ndim == 1 and obj._index is not None
@_cudf_nvtx_annotate
def _drop_rows_by_labels(
obj: DataFrameOrSeries,
labels: Union[ColumnLike, abc.Iterable, str],
level: Union[int, str],
errors: str,
) -> DataFrameOrSeries:
"""Remove rows specified by `labels`.
If `errors="raise"`, an error is raised if some items in `labels` do not
exist in `obj._index`.
Will raise if level(int) is greater or equal to index nlevels.
"""
if isinstance(level, int) and level >= obj.index.nlevels:
raise ValueError("Param level out of bounds.")
if not isinstance(labels, cudf.core.single_column_frame.SingleColumnFrame):
labels = as_column(labels)
if isinstance(obj.index, cudf.MultiIndex):
if level is None:
level = 0
levels_index = obj.index.get_level_values(level)
if errors == "raise" and not labels.isin(levels_index).all():
raise KeyError("One or more values not found in axis")
if isinstance(level, int):
ilevel = level
else:
ilevel = obj._index.names.index(level)
# 1. Merge Index df and data df along column axis:
# | id | ._index df | data column(s) |
idx_nlv = obj._index.nlevels
working_df = obj._index.to_frame(index=False)
working_df.columns = list(range(idx_nlv))
for i, col in enumerate(obj._data):
working_df[idx_nlv + i] = obj._data[col]
# 2. Set `level` as common index:
# | level | ._index df w/o level | data column(s) |
working_df = working_df.set_index(level)
# 3. Use "leftanti" join to drop
# TODO: use internal API with "leftanti" and specify left and right
# join keys to bypass logic check
to_join = cudf.DataFrame(index=cudf.Index(labels, name=level))
join_res = working_df.join(to_join, how="leftanti")
# 4. Reconstruct original layout, and rename
join_res._insert(
ilevel, name=join_res._index.name, value=join_res._index
)
midx = cudf.MultiIndex.from_frame(
join_res.iloc[:, 0:idx_nlv], names=obj._index.names
)
if isinstance(obj, cudf.Series):
return obj.__class__._from_data(
join_res.iloc[:, idx_nlv:]._data, index=midx, name=obj.name
)
else:
return obj.__class__._from_data(
join_res.iloc[:, idx_nlv:]._data,
index=midx,
columns=obj._data.to_pandas_index(),
)
else:
if errors == "raise" and not labels.isin(obj.index).all():
raise KeyError("One or more values not found in axis")
key_df = cudf.DataFrame._from_data(
data={},
index=cudf.Index(
labels, name=getattr(labels, "name", obj.index.name)
),
)
if isinstance(obj, cudf.DataFrame):
res = obj.join(key_df, how="leftanti")
else:
res = obj.to_frame(name="tmp").join(key_df, how="leftanti")["tmp"]
res.name = obj.name
# Join changes the index to common type,
# but we need to preserve the type of
# index being returned, Hence this type-cast.
res._index = res.index.astype(obj.index.dtype)
return res
def _is_same_dtype(lhs_dtype, rhs_dtype):
# Utility specific to `_reindex` to check
# for matching column dtype.
if lhs_dtype == rhs_dtype:
return True
elif (
is_categorical_dtype(lhs_dtype)
and is_categorical_dtype(rhs_dtype)
and lhs_dtype.categories.dtype == rhs_dtype.categories.dtype
):
# OK if categories are not all the same
return True
elif (
is_categorical_dtype(lhs_dtype)
and not is_categorical_dtype(rhs_dtype)
and lhs_dtype.categories.dtype == rhs_dtype
):
return True
elif (
is_categorical_dtype(rhs_dtype)
and not is_categorical_dtype(lhs_dtype)
and rhs_dtype.categories.dtype == lhs_dtype
):
return True
else:
return False
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/resample.py
|
# SPDX-FileCopyrightText: Copyright (c) 2021-2023, NVIDIA CORPORATION &
# AFFILIATES. All rights reserved. SPDX-License-Identifier:
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import numpy as np
import pandas as pd
import cudf
import cudf._lib.labeling
import cudf.core.index
from cudf._typing import DataFrameOrSeries
from cudf.core.groupby.groupby import (
DataFrameGroupBy,
GroupBy,
SeriesGroupBy,
_Grouping,
)
from cudf.core.tools.datetimes import _offset_alias_to_code, _unit_dtype_map
class _Resampler(GroupBy):
grouping: "_ResampleGrouping"
def __init__(self, obj, by, axis=None, kind=None):
by = _ResampleGrouping(obj, by)
super().__init__(obj, by=by)
def agg(self, func):
result = super().agg(func)
if len(self.grouping.bin_labels) != len(result):
index = cudf.core.index.Index(
self.grouping.bin_labels, name=self.grouping.names[0]
)
return result._align_to_index(
index, how="right", sort=False, allow_non_unique=True
)
else:
return result.sort_index()
def asfreq(self):
return self.obj._align_to_index(
self.grouping.bin_labels,
how="right",
sort=False,
allow_non_unique=True,
)
def _scan_fill(self, method: str, limit: int) -> DataFrameOrSeries:
# TODO: can this be more efficient?
# first, compute the outer join between `self.obj` and the `bin_labels`
# to get the sampling "gaps":
upsampled = self.obj._align_to_index(
self.grouping.bin_labels,
how="outer",
sort=True,
allow_non_unique=True,
)
# fill the gaps:
filled = upsampled.fillna(method=method)
# filter the result to only include the values corresponding
# to the bin labels:
return filled._align_to_index(
self.grouping.bin_labels,
how="right",
sort=False,
allow_non_unique=True,
)
def serialize(self):
header, frames = super().serialize()
grouping_head, grouping_frames = self.grouping.serialize()
header["grouping"] = grouping_head
header["resampler_type"] = pickle.dumps(type(self))
header["grouping_frames_count"] = len(grouping_frames)
frames.extend(grouping_frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
obj_type = pickle.loads(header["obj_type"])
obj = obj_type.deserialize(
header["obj"], frames[: header["num_obj_frames"]]
)
grouping = _ResampleGrouping.deserialize(
header["grouping"], frames[header["num_obj_frames"] :]
)
resampler_cls = pickle.loads(header["resampler_type"])
out = resampler_cls.__new__(resampler_cls)
out.grouping = grouping
super().__init__(out, obj, by=grouping)
return out
class DataFrameResampler(_Resampler, DataFrameGroupBy):
pass
class SeriesResampler(_Resampler, SeriesGroupBy):
pass
class _ResampleGrouping(_Grouping):
bin_labels: cudf.core.index.Index
def copy(self, deep=True):
out = super().copy(deep=deep)
result = _ResampleGrouping.__new__(_ResampleGrouping)
result.names = out.names
result._named_columns = out._named_columns
result._key_columns = out._key_columns
result.bin_labels = self.bin_labels.copy(deep=deep)
return result
def serialize(self):
header, frames = super().serialize()
labels_head, labels_frames = self.bin_labels.serialize()
header["__bin_labels"] = labels_head
header["__bin_labels_count"] = len(labels_frames)
frames.extend(labels_frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
names = pickle.loads(header["names"])
_named_columns = pickle.loads(header["_named_columns"])
key_columns = cudf.core.column.deserialize_columns(
header["columns"], frames[: -header["__bin_labels_count"]]
)
out = _ResampleGrouping.__new__(_ResampleGrouping)
out.names = names
out._named_columns = _named_columns
out._key_columns = key_columns
out.bin_labels = cudf.core.index.Index.deserialize(
header["__bin_labels"], frames[-header["__bin_labels_count"] :]
)
return out
def _handle_frequency_grouper(self, by):
# if `by` is a time frequency grouper, we bin the key column
# using bin intervals specified by `by.freq`, then use *that*
# as the groupby key
freq = by.freq
label = by.label
closed = by.closed
if isinstance(freq, (cudf.DateOffset, pd.DateOffset)):
raise NotImplementedError(
"Resampling by DateOffset objects is not yet supported."
)
if not isinstance(freq, str):
raise TypeError(
f"Unsupported type for freq: {type(freq).__name__}"
)
# convert freq to a pd.DateOffset:
offset = pd.tseries.frequencies.to_offset(freq)
if offset.freqstr == "M" or offset.freqstr.startswith("W-"):
label = "right" if label is None else label
closed = "right" if closed is None else closed
else:
label = "left" if label is None else label
closed = "left" if closed is None else closed
# determine the key column
if by.key is None and by.level is None:
# then assume that the key is the index of `self._obj`:
self._handle_index(self._obj.index)
elif by.key:
self._handle_label(by.key)
elif by.level:
self._handle_level(by.level)
if not len(self._key_columns) == 1:
raise ValueError("Must resample on exactly one column")
key_column = self._key_columns[0]
if not isinstance(key_column, cudf.core.column.DatetimeColumn):
raise TypeError(
f"Can only resample on a DatetimeIndex or datetime column, "
f"got column of type {key_column.dtype}"
)
# get the start and end values that will be used to generate
# the bin labels
min_date, max_date = key_column._minmax()
start, end = _get_timestamp_range_edges(
pd.Timestamp(min_date.value),
pd.Timestamp(max_date.value),
offset,
closed=closed,
)
# in some cases, an extra time stamp is required in order to
# bin all the values. It's OK if we generate more labels than
# we need, as we remove any unused labels below
end += offset
# generate the labels for binning the key column:
bin_labels = cudf.date_range(
start=start,
end=end,
freq=freq,
)
# We want the (resampled) column of timestamps in the result
# to have a resolution closest to the resampling
# frequency. For example, if resampling from '1T' to '1s', we
# want the resulting timestamp column to by of dtype
# 'datetime64[s]'. libcudf requires the bin labels and key
# column to have the same dtype, so we compute a `result_type`
# and cast them both to that type.
try:
result_type = np.dtype(
_unit_dtype_map[_offset_alias_to_code[offset.name]]
)
except KeyError:
# unsupported resolution (we don't support resolutions >s)
# fall back to using datetime64[s]
result_type = np.dtype("datetime64[s]")
# TODO: Ideally, we can avoid one cast by having `date_range`
# generate timestamps of a given dtype. Currently, it can
# only generate timestamps with 'ns' precision
key_column = key_column.astype(result_type)
bin_labels = bin_labels.astype(result_type)
# bin the key column:
bin_numbers = cudf._lib.labeling.label_bins(
key_column,
left_edges=bin_labels[:-1]._column,
left_inclusive=(closed == "left"),
right_edges=bin_labels[1:]._column,
right_inclusive=(closed == "right"),
)
if label == "right":
bin_labels = bin_labels[1:]
else:
bin_labels = bin_labels[:-1]
# if we have more labels than bins, remove the extras labels:
nbins = bin_numbers.max() + 1
if len(bin_labels) > nbins:
bin_labels = bin_labels[:nbins]
bin_labels.name = self.names[0]
self.bin_labels = bin_labels
# replace self._key_columns with the binned key column:
self._key_columns = [
bin_labels._gather(bin_numbers, check_bounds=False)._column.astype(
result_type
)
]
# NOTE: this function is vendored from Pandas
def _get_timestamp_range_edges(
first, last, freq, closed="left", origin="start_day", offset=None
):
"""
Adjust the `first` Timestamp to the preceding Timestamp that resides on
the provided offset. Adjust the `last` Timestamp to the following
Timestamp that resides on the provided offset. Input Timestamps that
already reside on the offset will be adjusted depending on the type of
offset and the `closed` parameter.
Parameters
----------
first : pd.Timestamp
The beginning Timestamp of the range to be adjusted.
last : pd.Timestamp
The ending Timestamp of the range to be adjusted.
freq : pd.DateOffset
The dateoffset to which the Timestamps will be adjusted.
closed : {'right', 'left'}, default None
Which side of bin interval is closed.
origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day'
The timestamp on which to adjust the grouping. The timezone of origin
must match the timezone of the index. If a timestamp is not used,
these values are also supported:
- 'epoch': `origin` is 1970-01-01
- 'start': `origin` is the first value of the timeseries
- 'start_day': `origin` is the first day at midnight of the timeseries
offset : pd.Timedelta, default is None
An offset timedelta added to the origin.
Returns
-------
A tuple of length 2, containing the adjusted pd.Timestamp objects.
"""
from pandas.tseries.offsets import Day, Tick
if isinstance(freq, Tick):
index_tz = first.tz
if isinstance(origin, pd.Timestamp) and (origin.tz is None) != (
index_tz is None
):
raise ValueError(
"The origin must have the same timezone as the index."
)
elif origin == "epoch":
# set the epoch based on the timezone to have similar bins results
# when resampling on the same kind of indexes on different
# timezones
origin = pd.Timestamp("1970-01-01", tz=index_tz)
if isinstance(freq, Day):
# _adjust_dates_anchored assumes 'D' means 24H, but first/last
# might contain a DST transition (23H, 24H, or 25H).
# So "pretend" the dates are naive when adjusting the endpoints
first = first.tz_localize(None)
last = last.tz_localize(None)
if isinstance(origin, pd.Timestamp):
origin = origin.tz_localize(None)
first, last = _adjust_dates_anchored(
first, last, freq, closed=closed, origin=origin, offset=offset
)
if isinstance(freq, Day):
first = first.tz_localize(index_tz)
last = last.tz_localize(index_tz)
else:
first = first.normalize()
last = last.normalize()
if closed == "left":
first = pd.Timestamp(freq.rollback(first))
else:
first = pd.Timestamp(first - freq)
last = pd.Timestamp(last + freq)
return first, last
# NOTE: this function is vendored from Pandas
def _adjust_dates_anchored(
first, last, freq, closed="right", origin="start_day", offset=None
):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency. See GH 8683
# To handle frequencies that are not multiple or divisible by a day we let
# the possibility to define a fixed origin timestamp. See GH 31809
origin_nanos = 0 # origin == "epoch"
if origin == "start_day":
origin_nanos = first.normalize().value
elif origin == "start":
origin_nanos = first.value
elif isinstance(origin, pd.Timestamp):
origin_nanos = origin.value
origin_nanos += offset.value if offset else 0
# GH 10117 & GH 19375. If first and last contain timezone information,
# Perform the calculation in UTC in order to avoid localizing on an
# Ambiguous or Nonexistent time.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
if first_tzinfo is not None:
first = first.tz_convert("UTC")
if last_tzinfo is not None:
last = last.tz_convert("UTC")
foffset = (first.value - origin_nanos) % freq.nanos
loffset = (last.value - origin_nanos) % freq.nanos
if closed == "right":
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - freq.nanos
if loffset > 0:
# roll forward
lresult = last.value + (freq.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (freq.nanos - loffset)
else:
lresult = last.value + freq.nanos
fresult = pd.Timestamp(fresult)
lresult = pd.Timestamp(lresult)
if first_tzinfo is not None:
fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo)
if last_tzinfo is not None:
lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo)
return fresult, lresult
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/_base_index.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
from __future__ import annotations
import builtins
import pickle
import warnings
from functools import cached_property
from typing import Any, Set, Tuple
import pandas as pd
from typing_extensions import Self
import cudf
from cudf._lib.copying import _gather_map_is_valid, gather
from cudf._lib.stream_compaction import (
apply_boolean_mask,
drop_duplicates,
drop_nulls,
)
from cudf._lib.types import size_type_dtype
from cudf.api.extensions import no_default
from cudf.api.types import (
is_bool_dtype,
is_integer,
is_integer_dtype,
is_list_like,
is_scalar,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
)
from cudf.core.abc import Serializable
from cudf.core.column import ColumnBase, column
from cudf.core.column_accessor import ColumnAccessor
from cudf.errors import MixedTypeError
from cudf.utils import ioutils
from cudf.utils.dtypes import can_convert_to_column, is_mixed_with_object_dtype
from cudf.utils.utils import _is_same_name
class BaseIndex(Serializable):
"""Base class for all cudf Index types."""
_accessors: Set[Any] = set()
_data: ColumnAccessor
@property
def _columns(self) -> Tuple[Any, ...]:
raise NotImplementedError
@cached_property
def _values(self) -> ColumnBase:
raise NotImplementedError
def copy(self, deep: bool = True) -> Self:
raise NotImplementedError
def __len__(self):
raise NotImplementedError
@property
def size(self):
# The size of an index is always its length irrespective of dimension.
return len(self)
def astype(self, dtype, copy: bool = True):
"""Create an Index with values cast to dtypes.
The class of a new Index is determined by dtype. When conversion is
impossible, a ValueError exception is raised.
Parameters
----------
dtype : :class:`numpy.dtype`
Use a :class:`numpy.dtype` to cast entire Index object to.
copy : bool, default False
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
Returns
-------
Index
Index with values cast to specified dtype.
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, 3])
>>> index
Int64Index([1, 2, 3], dtype='int64')
>>> index.astype('float64')
Float64Index([1.0, 2.0, 3.0], dtype='float64')
"""
raise NotImplementedError
def argsort(self, *args, **kwargs):
"""Return the integer indices that would sort the index.
Parameters vary by subclass.
"""
raise NotImplementedError
@property
def dtype(self):
raise NotImplementedError
@property
def empty(self):
return self.size == 0
@property
def is_unique(self):
"""Return if the index has unique values."""
raise NotImplementedError
def memory_usage(self, deep=False):
"""Return the memory usage of an object.
Parameters
----------
deep : bool
The deep parameter is ignored and is only included for pandas
compatibility.
Returns
-------
The total bytes used.
"""
raise NotImplementedError
def tolist(self): # noqa: D102
raise TypeError(
"cuDF does not support conversion to host memory "
"via the `tolist()` method. Consider using "
"`.to_arrow().to_pylist()` to construct a Python list."
)
to_list = tolist
@property
def name(self):
"""Returns the name of the Index."""
raise NotImplementedError
@property # type: ignore
def ndim(self): # noqa: D401
"""Number of dimensions of the underlying data, by definition 1."""
return 1
def equals(self, other):
"""
Determine if two Index objects contain the same elements.
Returns
-------
out: bool
True if "other" is an Index and it has the same elements
as calling index; False otherwise.
"""
raise NotImplementedError
def shift(self, periods=1, freq=None):
"""Not yet implemented"""
raise NotImplementedError
@property
def shape(self):
"""Get a tuple representing the dimensionality of the data."""
return (len(self),)
@property
def str(self):
"""Not yet implemented."""
raise NotImplementedError
@property
def values(self):
raise NotImplementedError
def max(self):
"""The maximum value of the index."""
raise NotImplementedError
def min(self):
"""The minimum value of the index."""
raise NotImplementedError
def get_loc(self, key, method=None, tolerance=None):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError()
def __contains__(self, item):
return item in self._values
def _copy_type_metadata(
self, other: Self, *, override_dtypes=None
) -> Self:
raise NotImplementedError
def get_level_values(self, level):
"""
Return an Index of values for requested level.
This is primarily useful to get an individual level of values from a
MultiIndex, but is provided on Index as well for compatibility.
Parameters
----------
level : int or str
It is either the integer position or the name of the level.
Returns
-------
Index
Calling object, as there is only one level in the Index.
See Also
--------
cudf.MultiIndex.get_level_values : Get values for
a level of a MultiIndex.
Notes
-----
For Index, level should be 0, since there are no multiple levels.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["a", "b", "c"])
>>> idx.get_level_values(0)
StringIndex(['a' 'b' 'c'], dtype='object')
"""
if level == self.name:
return self
elif is_integer(level):
if level != 0:
raise IndexError(
f"Cannot get level: {level} " f"for index with 1 level"
)
return self
else:
raise KeyError(f"Requested level with name {level} " "not found")
@classmethod
def deserialize(cls, header, frames):
# Dispatch deserialization to the appropriate index type in case
# deserialization is ever attempted with the base class directly.
idx_type = pickle.loads(header["type-serialized"])
return idx_type.deserialize(header, frames)
@property
def names(self):
"""
Returns a tuple containing the name of the Index.
"""
return (self.name,)
@names.setter
def names(self, values):
if not is_list_like(values):
raise ValueError("Names must be a list-like")
num_values = len(values)
if num_values > 1:
raise ValueError(
"Length of new names must be 1, got %d" % num_values
)
self.name = values[0]
def _clean_nulls_from_index(self):
"""
Convert all na values(if any) in Index object
to `<NA>` as a preprocessing step to `__repr__` methods.
This will involve changing type of Index object
to StringIndex but it is the responsibility of the `__repr__`
methods using this method to replace or handle representation
of the actual types correctly.
"""
raise NotImplementedError
@property
def is_monotonic(self):
"""Return boolean if values in the object are monotonic_increasing.
This property is an alias for :attr:`is_monotonic_increasing`.
Returns
-------
bool
"""
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"is_monotonic is deprecated and will be removed in a future "
"version. Use is_monotonic_increasing instead.",
FutureWarning,
)
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""Return boolean if values in the object are monotonically increasing.
Returns
-------
bool
"""
raise NotImplementedError
@property
def is_monotonic_decreasing(self):
"""Return boolean if values in the object are monotonically decreasing.
Returns
-------
bool
"""
raise NotImplementedError
@property
def hasnans(self):
"""
Return True if there are any NaNs or nulls.
Returns
-------
out : bool
If Series has at least one NaN or null value, return True,
if not return False.
Examples
--------
>>> import cudf
>>> import numpy as np
>>> index = cudf.Index([1, 2, np.nan, 3, 4], nan_as_null=False)
>>> index
Float64Index([1.0, 2.0, nan, 3.0, 4.0], dtype='float64')
>>> index.hasnans
True
`hasnans` returns `True` for the presence of any `NA` values:
>>> index = cudf.Index([1, 2, None, 3, 4])
>>> index
Int64Index([1, 2, <NA>, 3, 4], dtype='int64')
>>> index.hasnans
True
"""
raise NotImplementedError
@property
def nlevels(self):
"""
Number of levels.
"""
return 1
def _set_names(self, names, inplace=False):
if inplace:
idx = self
else:
idx = self.copy(deep=False)
idx.names = names
if not inplace:
return idx
def set_names(self, names, level=None, inplace=False):
"""
Set Index or MultiIndex name.
Able to set new names partially and by level.
Parameters
----------
names : label or list of label
Name(s) to set.
level : int, label or list of int or label, optional
If the index is a MultiIndex, level(s) to set (None for all
levels). Otherwise level must be None.
inplace : bool, default False
Modifies the object directly, instead of creating a new Index or
MultiIndex.
Returns
-------
Index
The same type as the caller or None if inplace is True.
See Also
--------
cudf.Index.rename : Able to set new names without level.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx.set_names('quarter')
Int64Index([1, 2, 3, 4], dtype='int64', name='quarter')
>>> idx = cudf.MultiIndex.from_product([['python', 'cobra'],
... [2018, 2019]])
>>> idx
MultiIndex([('python', 2018),
('python', 2019),
( 'cobra', 2018),
( 'cobra', 2019)],
)
>>> idx.names
FrozenList([None, None])
>>> idx.set_names(['kind', 'year'], inplace=True)
>>> idx.names
FrozenList(['kind', 'year'])
>>> idx.set_names('species', level=0, inplace=True)
>>> idx.names
FrozenList(['species', 'year'])
"""
if level is not None:
raise ValueError("Level must be None for non-MultiIndex")
if not is_list_like(names):
names = [names]
return self._set_names(names=names, inplace=inplace)
@property
def has_duplicates(self):
return not self.is_unique
def where(self, cond, other=None, inplace=False):
"""
Replace values where the condition is False.
The replacement is taken from other.
Parameters
----------
cond : bool array-like with the same length as self
Condition to select the values on.
other : scalar, or array-like, default None
Replacement if the condition is False.
Returns
-------
cudf.Index
A copy of self with values replaced from other
where the condition is False.
"""
raise NotImplementedError
def factorize(self, sort=False, na_sentinel=None, use_na_sentinel=None):
raise NotImplementedError
def union(self, other, sort=None):
"""
Form the union of two Index objects.
Parameters
----------
other : Index or array-like
sort : bool or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` or `other` has length 0.
* False : do not sort the result.
Returns
-------
union : Index
Examples
--------
Union of an Index
>>> import cudf
>>> import pandas as pd
>>> idx1 = cudf.Index([1, 2, 3, 4])
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
MultiIndex case
>>> idx1 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx2 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]]
... )
... )
>>> idx2
MultiIndex([(3, 'Red'),
(3, 'Green'),
(2, 'Red'),
(2, 'Green')],
)
>>> idx1.union(idx2)
MultiIndex([(1, 'Blue'),
(1, 'Red'),
(2, 'Blue'),
(2, 'Green'),
(2, 'Red'),
(3, 'Green'),
(3, 'Red')],
)
>>> idx1.union(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue'),
(3, 'Red'),
(3, 'Green'),
(2, 'Green')],
)
"""
if not isinstance(other, BaseIndex):
other = cudf.Index(other, name=self.name)
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if cudf.get_option("mode.pandas_compatible"):
if (
is_bool_dtype(self.dtype) and not is_bool_dtype(other.dtype)
) or (
not is_bool_dtype(self.dtype) and is_bool_dtype(other.dtype)
):
# Bools + other types will result in mixed type.
# This is not yet consistent in pandas and specific to APIs.
raise MixedTypeError("Cannot perform union with mixed types")
if (
is_signed_integer_dtype(self.dtype)
and is_unsigned_integer_dtype(other.dtype)
) or (
is_unsigned_integer_dtype(self.dtype)
and is_signed_integer_dtype(other.dtype)
):
# signed + unsigned types will result in
# mixed type for union in pandas.
raise MixedTypeError("Cannot perform union with mixed types")
if not len(other) or self.equals(other):
common_dtype = cudf.utils.dtypes.find_common_type(
[self.dtype, other.dtype]
)
return self._get_reconciled_name_object(other).astype(common_dtype)
elif not len(self):
common_dtype = cudf.utils.dtypes.find_common_type(
[self.dtype, other.dtype]
)
return other._get_reconciled_name_object(self).astype(common_dtype)
result = self._union(other, sort=sort)
result.name = _get_result_name(self.name, other.name)
return result
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Whether to sort the resulting index.
* False : do not sort the result.
* None : sort the result, except when `self` and `other` are equal
or when the values cannot be compared.
Returns
-------
intersection : Index
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> idx1 = cudf.Index([1, 2, 3, 4])
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
MultiIndex case
>>> idx1 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 3, 4], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx2 = cudf.MultiIndex.from_pandas(
... pd.MultiIndex.from_arrays(
... [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]]
... )
... )
>>> idx1
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(3, 'Red'),
(4, 'Blue')],
)
>>> idx2
MultiIndex([(1, 'Red'),
(1, 'Blue'),
(2, 'Red'),
(2, 'Blue')],
)
>>> idx1.intersection(idx2)
MultiIndex([(1, 'Red'),
(1, 'Blue')],
)
>>> idx1.intersection(idx2, sort=False)
MultiIndex([(1, 'Red'),
(1, 'Blue')],
)
"""
if not can_convert_to_column(other):
raise TypeError("Input must be Index or array-like")
if not isinstance(other, BaseIndex):
other = cudf.Index(
other,
name=getattr(other, "name", self.name),
)
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values of "
f"None or False; {sort} was passed."
)
if not len(self) or not len(other) or self.equals(other):
common_dtype = cudf.utils.dtypes._dtype_pandas_compatible(
cudf.utils.dtypes.find_common_type([self.dtype, other.dtype])
)
lhs = self.unique() if self.has_duplicates else self
rhs = other
if not len(other):
lhs, rhs = rhs, lhs
return lhs._get_reconciled_name_object(rhs).astype(common_dtype)
res_name = _get_result_name(self.name, other.name)
if (self._is_boolean() and other._is_numeric()) or (
self._is_numeric() and other._is_boolean()
):
if isinstance(self, cudf.MultiIndex):
return self[:0].rename(res_name)
else:
return cudf.Index([], name=res_name)
if self.has_duplicates:
lhs = self.unique()
else:
lhs = self
if other.has_duplicates:
rhs = other.unique()
else:
rhs = other
result = lhs._intersection(rhs, sort=sort)
result.name = res_name
return result
def _get_reconciled_name_object(self, other):
"""
If the result of a set operation will be self,
return self, unless the name changes, in which
case make a shallow copy of self.
"""
name = _get_result_name(self.name, other.name)
if not _is_same_name(self.name, name):
return self.rename(name)
return self
def fillna(self, value, downcast=None):
"""
Fill null values with the specified value.
Parameters
----------
value : scalar
Scalar value to use to fill nulls. This value cannot be a
list-likes.
downcast : dict, default is None
This Parameter is currently NON-FUNCTIONAL.
Returns
-------
filled : Index
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, None, 4])
>>> index
Int64Index([1, 2, <NA>, 4], dtype='int64')
>>> index.fillna(3)
Int64Index([1, 2, 3, 4], dtype='int64')
"""
if downcast is not None:
raise NotImplementedError(
"`downcast` parameter is not yet supported"
)
return super().fillna(value=value)
def to_frame(self, index=True, name=no_default):
"""Create a DataFrame with a column containing this Index
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index
name : object, defaults to index.name
The passed name should substitute for the index name (if it has
one).
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
To override the name of the resulting column, specify `name`:
>>> idx.to_frame(index=False, name='zoo')
zoo
0 Ant
1 Bear
2 Cow
"""
if name is None:
warnings.warn(
"Explicitly passing `name=None` currently preserves "
"the Index's name or uses a default name of 0. This "
"behaviour is deprecated, and in the future `None` "
"will be used as the name of the "
"resulting DataFrame column.",
FutureWarning,
)
name = no_default
if name is not no_default:
col_name = name
elif self.name is None:
col_name = 0
else:
col_name = self.name
return cudf.DataFrame(
{col_name: self._values}, index=self if index else None
)
def to_arrow(self):
"""Convert to a suitable Arrow object."""
raise NotImplementedError
def to_cupy(self):
"""Convert to a cupy array."""
raise NotImplementedError
def to_numpy(self):
"""Convert to a numpy array."""
raise NotImplementedError
def any(self):
"""
Return whether any elements is True in Index.
"""
raise NotImplementedError
def isna(self):
"""
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as ``None``, `numpy.NAN` or `cudf.NA`, get
mapped to ``True`` values.
Everything else get mapped to ``False`` values.
Returns
-------
numpy.ndarray[bool]
A boolean array to indicate which entries are NA.
"""
raise NotImplementedError
def notna(self):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to ``True``.
NA values, such as None or `numpy.NAN`, get mapped to ``False``
values.
Returns
-------
numpy.ndarray[bool]
A boolean array to indicate which entries are not NA.
"""
raise NotImplementedError
def to_pandas(self, nullable=False):
"""
Convert to a Pandas Index.
Parameters
----------
nullable : bool, Default False
If ``nullable`` is ``True``, the resulting index will have
a corresponding nullable Pandas dtype.
If there is no corresponding nullable Pandas dtype present,
the resulting dtype will be a regular pandas dtype.
If ``nullable`` is ``False``, the resulting index will
either convert null values to ``np.nan`` or ``None``
depending on the dtype.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([-3, 10, 15, 20])
>>> idx
Int64Index([-3, 10, 15, 20], dtype='int64')
>>> idx.to_pandas()
Int64Index([-3, 10, 15, 20], dtype='int64')
>>> type(idx.to_pandas())
<class 'pandas.core.indexes.numeric.Int64Index'>
>>> type(idx)
<class 'cudf.core.index.Int64Index'>
"""
raise NotImplementedError
def isin(self, values):
"""Return a boolean array where the index values are in values.
Compute boolean array of whether each index value is found in
the passed set of values. The length of the returned boolean
array matches the length of the index.
Parameters
----------
values : set, list-like, Index
Sought values.
Returns
-------
is_contained : cupy array
CuPy array of boolean values.
Examples
--------
>>> idx = cudf.Index([1,2,3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
Check whether each index value in a list of values.
>>> idx.isin([1, 4])
array([ True, False, False])
"""
# To match pandas behavior, even though only list-like objects are
# supposed to be passed, only scalars throw errors. Other types (like
# dicts) just transparently return False (see the implementation of
# ColumnBase.isin).
raise NotImplementedError
def unique(self):
"""
Return unique values in the index.
Returns
-------
Index without duplicates
"""
raise NotImplementedError
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys.
Useful with map for returning an indexer based on an index.
Parameters
----------
index : Index, optional
Index of resulting Series. If None, defaults to original index.
name : str, optional
Name of resulting Series. If None, defaults to name of original
index.
Returns
-------
Series
The dtype will be based on the type of the Index values.
"""
return cudf.Series._from_data(
self._data,
index=self.copy(deep=False) if index is None else index,
name=self.name if name is None else name,
)
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
return cudf.io.dlpack.to_dlpack(self)
def append(self, other):
"""
Append a collection of Index objects together.
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 10, 100])
>>> idx
Int64Index([1, 2, 10, 100], dtype='int64')
>>> other = cudf.Index([200, 400, 50])
>>> other
Int64Index([200, 400, 50], dtype='int64')
>>> idx.append(other)
Int64Index([1, 2, 10, 100, 200, 400, 50], dtype='int64')
append accepts list of Index objects
>>> idx.append([other, other])
Int64Index([1, 2, 10, 100, 200, 400, 50, 200, 400, 50], dtype='int64')
"""
raise NotImplementedError
def difference(self, other, sort=None):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort the resulting index. By default, the
values are attempted to be sorted, but any TypeError from
incomparable elements is caught by cudf.
* None : Attempt to sort the result, but catch any TypeErrors
from comparing incomparable elements.
* False : Do not sort the result.
Returns
-------
difference : Index
Examples
--------
>>> import cudf
>>> idx1 = cudf.Index([2, 1, 3, 4])
>>> idx1
Int64Index([2, 1, 3, 4], dtype='int64')
>>> idx2 = cudf.Index([3, 4, 5, 6])
>>> idx2
Int64Index([3, 4, 5, 6], dtype='int64')
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
>>> idx1.difference(idx2, sort=False)
Int64Index([2, 1], dtype='int64')
"""
if not can_convert_to_column(other):
raise TypeError("Input must be Index or array-like")
if sort not in {None, False}:
raise ValueError(
f"The 'sort' keyword only takes the values "
f"of None or False; {sort} was passed."
)
other = cudf.Index(other, name=getattr(other, "name", self.name))
if not len(other):
return self._get_reconciled_name_object(other)
elif self.equals(other):
return self[:0]._get_reconciled_name_object(other)
res_name = _get_result_name(self.name, other.name)
if is_mixed_with_object_dtype(self, other):
difference = self.copy()
else:
other = other.copy(deep=False)
difference = cudf.core.index._index_from_data(
cudf.DataFrame._from_data({"None": self._column})
.merge(
cudf.DataFrame._from_data({"None": other._column}),
how="leftanti",
on="None",
)
._data
)
if self.dtype != other.dtype:
difference = difference.astype(self.dtype)
difference.name = res_name
if sort is None and len(other):
return difference.sort_values()
return difference
def is_numeric(self):
"""
Check if the Index only consists of numeric data.
.. deprecated:: 23.04
Use `cudf.api.types.is_any_real_numeric_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of numeric data.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4.0])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_numeric()
True
>>> idx = cudf.Index([1, 2, 3, 4.0, np.nan])
>>> idx.is_numeric()
True
>>> idx = cudf.Index(["Apple", "cold"])
>>> idx.is_numeric()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_numeric is deprecated. "
"Use cudf.api.types.is_any_real_numeric_dtype instead",
FutureWarning,
)
return self._is_numeric()
def _is_numeric(self):
raise NotImplementedError
def is_boolean(self):
"""
Check if the Index only consists of booleans.
.. deprecated:: 23.04
Use `cudf.api.types.is_bool_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of booleans.
See Also
--------
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([True, False, True])
>>> idx.is_boolean()
True
>>> idx = cudf.Index(["True", "False", "True"])
>>> idx.is_boolean()
False
>>> idx = cudf.Index([1, 2, 3])
>>> idx.is_boolean()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_boolean is deprecated. "
"Use cudf.api.types.is_bool_dtype instead",
FutureWarning,
)
return self._is_boolean()
def _is_boolean(self):
raise NotImplementedError
def is_integer(self):
"""
Check if the Index only consists of integers.
.. deprecated:: 23.04
Use `cudf.api.types.is_integer_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of integers.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_integer()
True
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_integer()
False
>>> idx = cudf.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_integer()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_integer is deprecated. "
"Use cudf.api.types.is_integer_dtype instead",
FutureWarning,
)
return self._is_integer()
def _is_integer(self):
raise NotImplementedError
def is_floating(self):
"""
Check if the Index is a floating type.
The Index may consist of only floats, NaNs, or a mix of floats,
integers, or NaNs.
.. deprecated:: 23.04
Use `cudf.api.types.is_float_dtype` instead.
Returns
-------
bool
Whether or not the Index only consists of only consists
of floats, NaNs, or a mix of floats, integers, or NaNs.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_floating()
True
>>> idx = cudf.Index([1.0, 2.0, np.nan, 4.0])
>>> idx.is_floating()
True
>>> idx = cudf.Index([1, 2, 3, 4, np.nan], nan_as_null=False)
>>> idx.is_floating()
True
>>> idx = cudf.Index([1, 2, 3, 4])
>>> idx.is_floating()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_floating is deprecated. "
"Use cudf.api.types.is_float_dtype instead",
FutureWarning,
)
return self._is_floating()
def _is_floating(self):
raise NotImplementedError
def is_object(self):
"""
Check if the Index is of the object dtype.
.. deprecated:: 23.04
Use `cudf.api.types.is_object_dtype` instead.
Returns
-------
bool
Whether or not the Index is of the object dtype.
See Also
--------
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_categorical : Check if the Index holds categorical data.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["Apple", "Mango", "Watermelon"])
>>> idx.is_object()
True
>>> idx = cudf.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_object()
False
>>> idx = cudf.Index([1.0, 2.0, 3.0, 4.0])
>>> idx.is_object()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_object is deprecated. "
"Use cudf.api.types.is_object_dtype instead",
FutureWarning,
)
return self._is_object()
def _is_object(self):
raise NotImplementedError
def is_categorical(self):
"""
Check if the Index holds categorical data.
.. deprecated:: 23.04
Use `cudf.api.types.is_categorical_dtype` instead.
Returns
-------
bool
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_interval : Check if the Index holds Interval objects.
Examples
--------
>>> import cudf
>>> idx = cudf.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = cudf.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = cudf.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_categorical is deprecated. "
"Use cudf.api.types.is_categorical_dtype instead",
FutureWarning,
)
return self._is_categorical()
def _is_categorical(self):
raise NotImplementedError
def is_interval(self):
"""
Check if the Index holds Interval objects.
.. deprecated:: 23.04
Use `cudf.api.types.is_interval_dtype` instead.
Returns
-------
bool
Whether or not the Index holds Interval objects.
See Also
--------
IntervalIndex : Index for Interval objects.
is_boolean : Check if the Index only consists of booleans.
is_integer : Check if the Index only consists of integers.
is_floating : Check if the Index is a floating type.
is_numeric : Check if the Index only consists of numeric data.
is_object : Check if the Index is of the object dtype.
is_categorical : Check if the Index holds categorical data.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> idx = cudf.from_pandas(
... pd.Index([pd.Interval(left=0, right=5),
... pd.Interval(left=5, right=10)])
... )
>>> idx.is_interval()
True
>>> idx = cudf.Index([1, 3, 5, 7])
>>> idx.is_interval()
False
"""
# Do not remove until pandas removes this.
warnings.warn(
f"{type(self).__name__}.is_interval is deprecated. "
"Use cudf.api.types.is_interval_dtype instead",
FutureWarning,
)
return self._is_interval()
def _is_interval(self):
raise NotImplementedError
def _union(self, other, sort=None):
# TODO: As a future optimization we should explore
# not doing `to_frame`
self_df = self.to_frame(index=False, name=0)
other_df = other.to_frame(index=False, name=0)
self_df["order"] = self_df.index
other_df["order"] = other_df.index
res = self_df.merge(other_df, on=[0], how="outer")
res = res.sort_values(
by=res._data.to_pandas_index()[1:], ignore_index=True
)
union_result = cudf.core.index._index_from_data({0: res._data[0]})
if sort is None and len(other):
return union_result.sort_values()
return union_result
def _intersection(self, other, sort=None):
intersection_result = cudf.core.index._index_from_data(
cudf.DataFrame._from_data({"None": self.unique()._column})
.merge(
cudf.DataFrame._from_data({"None": other.unique()._column}),
how="inner",
on="None",
)
._data
)
if sort is None and len(other):
return intersection_result.sort_values()
return intersection_result
def sort_values(
self,
return_indexer=False,
ascending=True,
na_position="last",
key=None,
):
"""
Return a sorted copy of the index, and optionally return the indices
that sorted the index itself.
Parameters
----------
return_indexer : bool, default False
Should the indices that would sort the index be returned.
ascending : bool, default True
Should the index values be sorted in an ascending order.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at
the end.
key : None, optional
This parameter is NON-FUNCTIONAL.
Returns
-------
sorted_index : Index
Sorted copy of the index.
indexer : cupy.ndarray, optional
The indices that the index itself was sorted by.
See Also
--------
cudf.Series.min : Sort values of a Series.
cudf.DataFrame.sort_values : Sort values in a DataFrame.
Examples
--------
>>> import cudf
>>> idx = cudf.Index([10, 100, 1, 1000])
>>> idx
Int64Index([10, 100, 1, 1000], dtype='int64')
Sort values in ascending order (default behavior).
>>> idx.sort_values()
Int64Index([1, 10, 100, 1000], dtype='int64')
Sort values in descending order, and also get the indices `idx` was
sorted by.
>>> idx.sort_values(ascending=False, return_indexer=True)
(Int64Index([1000, 100, 10, 1], dtype='int64'), array([3, 1, 0, 2],
dtype=int32))
Sorting values in a MultiIndex:
>>> midx = cudf.MultiIndex(
... levels=[[1, 3, 4, -10], [1, 11, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx
MultiIndex([( 1, 1),
( 1, 5),
( 3, 11),
( 4, 11),
(-10, 1)],
names=['x', 'y'])
>>> midx.sort_values()
MultiIndex([(-10, 1),
( 1, 1),
( 1, 5),
( 3, 11),
( 4, 11)],
names=['x', 'y'])
>>> midx.sort_values(ascending=False)
MultiIndex([( 4, 11),
( 3, 11),
( 1, 5),
( 1, 1),
(-10, 1)],
names=['x', 'y'])
"""
if key is not None:
raise NotImplementedError("key parameter is not yet implemented.")
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
indices = self.argsort(ascending=ascending, na_position=na_position)
index_sorted = self.take(indices)
if return_indexer:
return index_sorted, indices
else:
return index_sorted
def join(
self, other, how="left", level=None, return_indexers=False, sort=False
):
"""
Compute join_index and indexers to conform data structures
to the new index.
Parameters
----------
other : Index.
how : {'left', 'right', 'inner', 'outer'}
return_indexers : bool, default False
sort : bool, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword).
Returns: index
Examples
--------
>>> import cudf
>>> lhs = cudf.DataFrame({
... "a": [2, 3, 1],
... "b": [3, 4, 2],
... }).set_index(['a', 'b']).index
>>> lhs
MultiIndex([(2, 3),
(3, 4),
(1, 2)],
names=['a', 'b'])
>>> rhs = cudf.DataFrame({"a": [1, 4, 3]}).set_index('a').index
>>> rhs
Int64Index([1, 4, 3], dtype='int64', name='a')
>>> lhs.join(rhs, how='inner')
MultiIndex([(3, 4),
(1, 2)],
names=['a', 'b'])
"""
if return_indexers is not False:
raise NotImplementedError("return_indexers is not implemented")
self_is_multi = isinstance(self, cudf.MultiIndex)
other_is_multi = isinstance(other, cudf.MultiIndex)
if level is not None:
if self_is_multi and other_is_multi:
raise TypeError(
"Join on level between two MultiIndex objects is ambiguous"
)
if not is_scalar(level):
raise ValueError("level should be an int or a label only")
if other_is_multi:
if how == "left":
how = "right"
elif how == "right":
how = "left"
rhs = self.copy(deep=False)
lhs = other.copy(deep=False)
else:
lhs = self.copy(deep=False)
rhs = other.copy(deep=False)
same_names = lhs.names == rhs.names
# There should be no `None` values in Joined indices,
# so essentially it would be `left/right` or 'inner'
# in case of MultiIndex
if isinstance(lhs, cudf.MultiIndex):
on = (
lhs._data.select_by_index(level).names[0]
if isinstance(level, int)
else level
)
if on is not None:
rhs.names = (on,)
on = rhs.names[0]
if how == "outer":
how = "left"
elif how == "right":
how = "inner"
else:
# Both are normal indices
on = lhs.names[0]
rhs.names = lhs.names
lhs = lhs.to_frame()
rhs = rhs.to_frame()
output = lhs.merge(rhs, how=how, on=on, sort=sort)
# If both inputs were MultiIndexes, the output is a MultiIndex.
# Otherwise, the output is only a MultiIndex if there are multiple
# columns
if self_is_multi and other_is_multi:
return cudf.MultiIndex._from_data(output._data)
else:
idx = cudf.core.index._index_from_data(output._data)
idx.name = self.name if same_names else None
return idx
def rename(self, name, inplace=False):
"""
Alter Index name.
Defaults to returning new index.
Parameters
----------
name : label
Name(s) to set.
Returns
-------
Index
Examples
--------
>>> import cudf
>>> index = cudf.Index([1, 2, 3], name='one')
>>> index
Int64Index([1, 2, 3], dtype='int64', name='one')
>>> index.name
'one'
>>> renamed_index = index.rename('two')
>>> renamed_index
Int64Index([1, 2, 3], dtype='int64', name='two')
>>> renamed_index.name
'two'
"""
if inplace is True:
self.name = name
return None
else:
out = self.copy(deep=True)
out.name = name
return out
def _indices_of(self, value) -> cudf.core.column.NumericalColumn:
"""
Return indices corresponding to value
Parameters
----------
value
Value to look for in index
Returns
-------
Column of indices
"""
raise NotImplementedError
def find_label_range(self, loc: slice) -> slice:
"""
Translate a label-based slice to an index-based slice
Parameters
----------
loc
slice to search for.
Notes
-----
As with all label-based searches, the slice is right-closed.
Returns
-------
New slice translated into integer indices of the index (right-open).
"""
start = loc.start
stop = loc.stop
step = 1 if loc.step is None else loc.step
if step < 0:
start_side, stop_side = "right", "left"
else:
start_side, stop_side = "left", "right"
istart = (
None
if start is None
else self.get_slice_bound(start, side=start_side)
)
istop = (
None
if stop is None
else self.get_slice_bound(stop, side=stop_side)
)
if step < 0:
# Fencepost
istart = None if istart is None else max(istart - 1, 0)
istop = None if (istop is None or istop == 0) else istop - 1
return slice(istart, istop, step)
def searchsorted(
self,
value,
side: builtins.str = "left",
ascending: bool = True,
na_position: builtins.str = "last",
):
"""Find index where elements should be inserted to maintain order
Parameters
----------
value :
Value to be hypothetically inserted into Self
side : str {'left', 'right'} optional, default 'left'
If 'left', the index of the first suitable location found is given
If 'right', return the last such index
ascending : bool optional, default True
Index is in ascending order (otherwise descending)
na_position : str {'last', 'first'} optional, default 'last'
Position of null values in sorted order
Returns
-------
Insertion point.
Notes
-----
As a precondition the index must be sorted in the same order
as requested by the `ascending` flag.
"""
raise NotImplementedError
def get_slice_bound(self, label, side: builtins.str, kind=None) -> int:
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
int
Index of label.
"""
if kind is not None:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"'kind' argument in get_slice_bound is deprecated and will be "
"removed in a future version.",
FutureWarning,
)
if side not in {"left", "right"}:
raise ValueError(f"Invalid side argument {side}")
if self.is_monotonic_increasing or self.is_monotonic_decreasing:
return self.searchsorted(
label, side=side, ascending=self.is_monotonic_increasing
)
else:
try:
left, right = self._values._find_first_and_last(label)
except ValueError:
raise KeyError(f"{label=} not in index")
if left != right:
raise KeyError(
f"Cannot get slice bound for non-unique label {label=}"
)
if side == "left":
return left
else:
return right + 1
def __array_function__(self, func, types, args, kwargs):
# check if the function is implemented for the current type
cudf_index_module = type(self)
for submodule in func.__module__.split(".")[1:]:
# point cudf_index_module to the correct submodule
if hasattr(cudf_index_module, submodule):
cudf_index_module = getattr(cudf_index_module, submodule)
else:
return NotImplemented
fname = func.__name__
handled_types = [BaseIndex, cudf.Series]
# check if we don't handle any of the types (including sub-class)
for t in types:
if not any(
issubclass(t, handled_type) for handled_type in handled_types
):
return NotImplemented
if hasattr(cudf_index_module, fname):
cudf_func = getattr(cudf_index_module, fname)
# Handle case if cudf_func is same as numpy function
if cudf_func is func:
return NotImplemented
else:
result = cudf_func(*args, **kwargs)
if fname == "unique":
# NumPy expects a sorted result for `unique`, which is not
# guaranteed by cudf.Index.unique.
result = result.sort_values()
return result
else:
return NotImplemented
@classmethod
def from_pandas(cls, index, nan_as_null=no_default):
"""
Convert from a Pandas Index.
Parameters
----------
index : Pandas Index object
A Pandas Index object which has to be converted
to cuDF Index.
nan_as_null : bool, Default None
If ``None``/``True``, converts ``np.nan`` values
to ``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> import numpy as np
>>> data = [10, 20, 30, np.nan]
>>> pdi = pd.Index(data)
>>> cudf.Index.from_pandas(pdi)
Float64Index([10.0, 20.0, 30.0, <NA>], dtype='float64')
>>> cudf.Index.from_pandas(pdi, nan_as_null=False)
Float64Index([10.0, 20.0, 30.0, nan], dtype='float64')
"""
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
if not isinstance(index, pd.Index):
raise TypeError("not a pandas.Index")
ind = cudf.Index(column.as_column(index, nan_as_null=nan_as_null))
ind.name = index.name
return ind
@property
def _constructor_expanddim(self):
return cudf.MultiIndex
def drop_duplicates(
self,
keep="first",
nulls_are_equal=True,
):
"""
Drop duplicate rows in index.
keep : {"first", "last", False}, default "first"
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
nulls_are_equal: bool, default True
Null elements are considered equal to other null elements.
"""
# This utilizes the fact that all `Index` is also a `Frame`.
# Except RangeIndex.
return self._from_columns_like_self(
drop_duplicates(
list(self._columns),
keys=range(len(self._data)),
keep=keep,
nulls_are_equal=nulls_are_equal,
),
self._column_names,
)
def duplicated(self, keep="first"):
"""
Indicate duplicate index values.
Duplicated values are indicated as ``True`` values in the resulting
array. Either all duplicates, all except the first, or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
The value or values in a set of duplicates to mark as missing.
- ``'first'`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``'last'`` : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
cupy.ndarray[bool]
See Also
--------
Series.duplicated : Equivalent method on cudf.Series.
DataFrame.duplicated : Equivalent method on cudf.DataFrame.
Index.drop_duplicates : Remove duplicate values from Index.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set to False and all others to True:
>>> import cudf
>>> idx = cudf.Index(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> idx.duplicated()
array([False, False, True, False, True])
which is equivalent to
>>> idx.duplicated(keep='first')
array([False, False, True, False, True])
By using 'last', the last occurrence of each set of duplicated values
is set to False and all others to True:
>>> idx.duplicated(keep='last')
array([ True, False, True, False, False])
By setting keep to ``False``, all duplicates are True:
>>> idx.duplicated(keep=False)
array([ True, False, True, False, True])
"""
return self.to_series().duplicated(keep=keep).to_cupy()
def dropna(self, how="any"):
"""
Drop null rows from Index.
how : {"any", "all"}, default "any"
Specifies how to decide whether to drop a row.
"any" (default) drops rows containing at least
one null value. "all" drops only rows containing
*all* null values.
"""
# This is to be consistent with IndexedFrame.dropna to handle nans
# as nulls by default
data_columns = [
col.nans_to_nulls()
if isinstance(col, cudf.core.column.NumericalColumn)
else col
for col in self._columns
]
return self._from_columns_like_self(
drop_nulls(
data_columns,
how=how,
keys=range(len(data_columns)),
),
self._column_names,
)
def _gather(self, gather_map, nullify=False, check_bounds=True):
"""Gather rows of index specified by indices in `gather_map`.
Skip bounds checking if check_bounds is False.
Set rows to null for all out of bound indices if nullify is `True`.
"""
gather_map = cudf.core.column.as_column(gather_map)
# TODO: For performance, the check and conversion of gather map should
# be done by the caller. This check will be removed in future release.
if not is_integer_dtype(gather_map.dtype):
gather_map = gather_map.astype(size_type_dtype)
if not _gather_map_is_valid(
gather_map, len(self), check_bounds, nullify
):
raise IndexError("Gather map index is out of bounds.")
return self._from_columns_like_self(
gather(list(self._columns), gather_map, nullify=nullify),
self._column_names,
)
def take(self, indices, axis=0, allow_fill=True, fill_value=None):
"""Return a new index containing the rows specified by *indices*
Parameters
----------
indices : array-like
Array of ints indicating which positions to take.
axis : int
The axis over which to select values, always 0.
allow_fill : Unsupported
fill_value : Unsupported
Returns
-------
out : Index
New object with desired subset of rows.
Examples
--------
>>> idx = cudf.Index(['a', 'b', 'c', 'd', 'e'])
>>> idx.take([2, 0, 4, 3])
StringIndex(['c' 'a' 'e' 'd'], dtype='object')
"""
if axis not in {0, "index"}:
raise NotImplementedError(
"Gather along column axis is not yet supported."
)
if not allow_fill or fill_value is not None:
raise NotImplementedError(
"`allow_fill` and `fill_value` are unsupported."
)
return self._gather(indices)
def _apply_boolean_mask(self, boolean_mask):
"""Apply boolean mask to each row of `self`.
Rows corresponding to `False` is dropped.
"""
boolean_mask = cudf.core.column.as_column(boolean_mask)
if not is_bool_dtype(boolean_mask.dtype):
raise ValueError("boolean_mask is not boolean type.")
return self._from_columns_like_self(
apply_boolean_mask(list(self._columns), boolean_mask),
column_names=self._column_names,
)
def repeat(self, repeats, axis=None):
"""Repeat elements of a Index.
Returns a new Index where each element of the current Index is repeated
consecutively a given number of times.
Parameters
----------
repeats : int, or array of ints
The number of repetitions for each element. This should
be a non-negative integer. Repeating 0 times will return
an empty object.
Returns
-------
Index
A newly created object of same type as caller with repeated
elements.
Examples
--------
>>> index = cudf.Index([10, 22, 33, 55])
>>> index
Int64Index([10, 22, 33, 55], dtype='int64')
>>> index.repeat(5)
Int64Index([10, 10, 10, 10, 10, 22, 22, 22, 22, 22, 33,
33, 33, 33, 33, 55, 55, 55, 55, 55],
dtype='int64')
"""
raise NotImplementedError
def _split_columns_by_levels(self, levels):
if isinstance(levels, int) and levels > 0:
raise ValueError(f"Out of bound level: {levels}")
return (
[self._data[self.name]],
[],
["index" if self.name is None else self.name],
[],
)
def _split(self, splits):
raise NotImplementedError
def _get_result_name(left_name, right_name):
return left_name if _is_same_name(left_name, right_name) else None
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/algorithms.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import warnings
import cupy as cp
import numpy as np
from cudf.core.column import as_column
from cudf.core.copy_types import BooleanMask
from cudf.core.index import RangeIndex, as_index
from cudf.core.indexed_frame import IndexedFrame
from cudf.core.scalar import Scalar
from cudf.options import get_option
from cudf.utils.dtypes import can_convert_to_column
def factorize(
values, sort=False, na_sentinel=None, use_na_sentinel=None, size_hint=None
):
"""Encode the input values as integer labels
Parameters
----------
values: Series, Index, or CuPy array
The data to be factorized.
sort : bool, default True
Sort uniques and shuffle codes to maintain the relationship.
na_sentinel : number, default -1
Value to indicate missing category.
.. deprecated:: 23.04
The na_sentinel argument is deprecated and will be removed in
a future version of cudf. Specify use_na_sentinel as
either True or False.
use_na_sentinel : bool, default True
If True, the sentinel -1 will be used for NA values.
If False, NA values will be encoded as non-negative
integers and will not drop the NA from the uniques
of the values.
Returns
-------
(labels, cats) : (cupy.ndarray, cupy.ndarray or Index)
- *labels* contains the encoded values
- *cats* contains the categories in order that the N-th
item corresponds to the (N-1) code.
See Also
--------
cudf.Series.factorize : Encode the input values of Series.
Examples
--------
>>> import cudf
>>> import numpy as np
>>> data = cudf.Series(['a', 'c', 'c'])
>>> codes, uniques = cudf.factorize(data)
>>> codes
array([0, 1, 1], dtype=int8)
>>> uniques
StringIndex(['a' 'c'], dtype='object')
When ``use_na_sentinel=True`` (the default), missing values are indicated
in the `codes` with the sentinel value ``-1`` and missing values are not
included in `uniques`.
>>> codes, uniques = cudf.factorize(['b', None, 'a', 'c', 'b'])
>>> codes
array([ 1, -1, 0, 2, 1], dtype=int8)
>>> uniques
StringIndex(['a' 'b' 'c'], dtype='object')
If NA is in the values, and we want to include NA in the uniques of the
values, it can be achieved by setting ``use_na_sentinel=False``.
>>> values = np.array([1, 2, 1, np.nan])
>>> codes, uniques = cudf.factorize(values)
>>> codes
array([ 0, 1, 0, -1], dtype=int8)
>>> uniques
Float64Index([1.0, 2.0], dtype='float64')
>>> codes, uniques = cudf.factorize(values, use_na_sentinel=False)
>>> codes
array([1, 2, 1, 0], dtype=int8)
>>> uniques
Float64Index([<NA>, 1.0, 2.0], dtype='float64')
"""
# TODO: Drop `na_sentinel` in the next release immediately after
# pandas 2.0 upgrade.
if na_sentinel is not None and use_na_sentinel is not None:
raise ValueError(
"Cannot specify both `na_sentinel` and `use_na_sentile`; "
f"got `na_sentinel={na_sentinel}` and "
f"`use_na_sentinel={use_na_sentinel}`"
)
return_cupy_array = isinstance(values, cp.ndarray)
if not can_convert_to_column(values):
raise TypeError(
"'values' can only be a Series, Index, or CuPy array, "
f"got {type(values)}"
)
values = as_column(values)
if na_sentinel is None:
na_sentinel = (
-1
if use_na_sentinel is None or use_na_sentinel
else Scalar(None, dtype=values.dtype)
)
else:
if na_sentinel is None:
msg = (
"Specifying `na_sentinel=None` is deprecated, specify "
"`use_na_sentinel=False` instead."
)
elif na_sentinel == -1:
msg = (
"Specifying `na_sentinel=-1` is deprecated, specify "
"`use_na_sentinel=True` instead."
)
else:
msg = (
"Specifying the specific value to use for `na_sentinel` is "
"deprecated and will be removed in a future version of cudf. "
"Specify `use_na_sentinel=True` to use the sentinel value -1, "
"and `use_na_sentinel=False` to encode NA values.",
)
# Do not remove until pandas 2.0 support is added.
warnings.warn(msg, FutureWarning)
if size_hint:
warnings.warn("size_hint is not applicable for cudf.factorize")
if use_na_sentinel is None or use_na_sentinel:
cats = values.dropna()
else:
cats = values
cats = cats.unique().astype(values.dtype)
if sort:
cats = cats.sort_values()
labels = values._label_encoding(
cats=cats,
na_sentinel=Scalar(na_sentinel),
dtype="int64" if get_option("mode.pandas_compatible") else None,
).values
return labels, cats.values if return_cupy_array else as_index(cats)
def _linear_interpolation(column, index=None):
"""
Interpolate over a float column. Implicitly assumes that values are
evenly spaced with respect to the x-axis, for example the data
[1.0, NaN, 3.0] will be interpolated assuming the NaN is half way
between the two valid values, yielding [1.0, 2.0, 3.0]
"""
index = RangeIndex(start=0, stop=len(column), step=1)
return _index_or_values_interpolation(column, index=index)
def _index_or_values_interpolation(column, index=None):
"""
Interpolate over a float column. assumes a linear interpolation
strategy using the index of the data to denote spacing of the x
values. For example the data and index [1.0, NaN, 4.0], [1, 3, 4]
would result in [1.0, 3.0, 4.0]
"""
# figure out where the nans are
mask = cp.isnan(column)
# trivial cases, all nan or no nans
num_nan = mask.sum()
if num_nan == 0 or num_nan == len(column):
return column
to_interp = IndexedFrame(data={None: column}, index=index)
known_x_and_y = to_interp._apply_boolean_mask(
BooleanMask(~mask, len(to_interp))
)
known_x = known_x_and_y._index._column.values
known_y = known_x_and_y._data.columns[0].values
result = cp.interp(to_interp._index.values, known_x, known_y)
# find the first nan
first_nan_idx = (mask == 0).argmax().item()
result[:first_nan_idx] = np.nan
return result
def get_column_interpolator(method):
interpolator = {
"linear": _linear_interpolation,
"index": _index_or_values_interpolation,
"values": _index_or_values_interpolation,
}.get(method, None)
if not interpolator:
raise ValueError(f"Interpolation method `{method}` not found")
return interpolator
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/dtypes.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import decimal
import operator
import pickle
import textwrap
from functools import cached_property
from typing import Any, Callable, Dict, List, Tuple, Type, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.api import types as pd_types
from pandas.api.extensions import ExtensionDtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype as pd_CategoricalDtype,
CategoricalDtypeType as pd_CategoricalDtypeType,
)
import cudf
from cudf._typing import Dtype
from cudf.core._compat import PANDAS_GE_150
from cudf.core.abc import Serializable
from cudf.core.buffer import Buffer
from cudf.utils.docutils import doc_apply
if PANDAS_GE_150:
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
else:
from pandas.core.arrays._arrow_utils import ArrowIntervalType
def dtype(arbitrary):
"""
Return the cuDF-supported dtype corresponding to `arbitrary`.
Parameters
----------
arbitrary: dtype or scalar-like
Returns
-------
dtype: the cuDF-supported dtype that best matches `arbitrary`
"""
# first, check if `arbitrary` is one of our extension types:
if isinstance(arbitrary, cudf.core.dtypes._BaseDtype):
return arbitrary
# next, try interpreting arbitrary as a NumPy dtype that we support:
try:
np_dtype = np.dtype(arbitrary)
if np_dtype.kind in ("OU"):
return np.dtype("object")
except TypeError:
pass
else:
if np_dtype not in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:
raise TypeError(f"Unsupported type {np_dtype}")
return np_dtype
# use `pandas_dtype` to try and interpret
# `arbitrary` as a Pandas extension type.
# Return the corresponding NumPy/cuDF type.
pd_dtype = pd.api.types.pandas_dtype(arbitrary)
if cudf.get_option(
"mode.pandas_compatible"
) and cudf.api.types._is_pandas_nullable_extension_dtype(pd_dtype):
raise NotImplementedError("not supported")
try:
return dtype(pd_dtype.numpy_dtype)
except AttributeError:
if isinstance(pd_dtype, pd.CategoricalDtype):
return cudf.CategoricalDtype.from_pandas(pd_dtype)
elif isinstance(pd_dtype, pd.StringDtype):
return np.dtype("object")
elif isinstance(pd_dtype, pd.IntervalDtype):
return cudf.IntervalDtype.from_pandas(pd_dtype)
elif isinstance(pd_dtype, pd.DatetimeTZDtype):
return pd_dtype
else:
raise TypeError(
f"Cannot interpret {arbitrary} as a valid cuDF dtype"
)
def _decode_type(
cls: Type,
header: dict,
frames: list,
is_valid_class: Callable[[Type, Type], bool] = operator.is_,
) -> Tuple[dict, list, Type]:
"""Decode metadata-encoded type and check validity
Parameters
----------
cls : type
class performing deserialization
header : dict
metadata for deserialization
frames : list
buffers containing data for deserialization
is_valid_class : Callable
function to call to check if the encoded class type is valid for
serialization by `cls` (default is to check type equality), called
as `is_valid_class(decoded_class, cls)`.
Returns
-------
tuple
Tuple of validated headers, frames, and the decoded class
constructor.
Raises
------
AssertionError
if the number of frames doesn't match the count encoded in the
headers, or `is_valid_class` is not true.
"""
assert header["frame_count"] == len(frames), (
f"Deserialization expected {header['frame_count']} frames, "
f"but received {len(frames)}."
)
klass = pickle.loads(header["type-serialized"])
assert is_valid_class(
klass, cls
), f"Header-encoded {klass=} does not match decoding {cls=}."
return header, frames, klass
class _BaseDtype(ExtensionDtype, Serializable):
# Base type for all cudf-specific dtypes
pass
class CategoricalDtype(_BaseDtype):
"""
Type for categorical data with the categories and orderedness.
Parameters
----------
categories : sequence, optional
Must be unique, and must not contain any nulls.
The categories are stored in an Index,
and if an index is provided the dtype of that index will be used.
ordered : bool or None, default False
Whether or not this categorical is treated as a ordered categorical.
None can be used to maintain the ordered value of existing categoricals
when used in operations that combine categoricals, e.g. astype, and
will resolve to False if there is no existing ordered to maintain.
Attributes
----------
categories
ordered
Methods
-------
from_pandas
to_pandas
Examples
--------
>>> import cudf
>>> dtype = cudf.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> cudf.Series(['a', 'b', 'a', 'c'], dtype=dtype)
0 a
1 b
2 a
3 <NA>
dtype: category
Categories (2, object): ['b' < 'a']
"""
def __init__(self, categories=None, ordered: bool = False) -> None:
self._categories = self._init_categories(categories)
self._ordered = ordered
@property
def categories(self) -> "cudf.core.index.GenericIndex":
"""
An ``Index`` containing the unique categories allowed.
Examples
--------
>>> import cudf
>>> dtype = cudf.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> dtype.categories
StringIndex(['b' 'a'], dtype='object')
"""
if self._categories is None:
return cudf.core.index.as_index(
cudf.core.column.column_empty(0, dtype="object", masked=False)
)
return cudf.core.index.as_index(self._categories, copy=False)
@property
def type(self):
return self._categories.dtype.type
@property
def name(self):
return "category"
@property
def str(self):
return "|O08"
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
"""
return self._ordered
@ordered.setter
def ordered(self, value) -> None:
self._ordered = value
@classmethod
def from_pandas(cls, dtype: pd.CategoricalDtype) -> "CategoricalDtype":
"""
Convert a ``pandas.CategrocialDtype`` to ``cudf.CategoricalDtype``
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> pd_dtype = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> pd_dtype
CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> cudf_dtype = cudf.CategoricalDtype.from_pandas(pd_dtype)
>>> cudf_dtype
CategoricalDtype(categories=['b', 'a'], ordered=True)
"""
return CategoricalDtype(
categories=dtype.categories, ordered=dtype.ordered
)
def to_pandas(self) -> pd.CategoricalDtype:
"""
Convert a ``cudf.CategoricalDtype`` to ``pandas.CategoricalDtype``
Examples
--------
>>> import cudf
>>> dtype = cudf.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> dtype
CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> dtype.to_pandas()
CategoricalDtype(categories=['b', 'a'], ordered=True)
"""
if self._categories is None:
categories = None
else:
if isinstance(
self._categories, (cudf.Float32Index, cudf.Float64Index)
):
categories = self._categories.dropna().to_pandas()
else:
categories = self._categories.to_pandas()
return pd.CategoricalDtype(categories=categories, ordered=self.ordered)
def _init_categories(self, categories: Any):
if categories is None:
return categories
if len(categories) == 0 and not is_interval_dtype(categories):
dtype = "object" # type: Any
else:
dtype = None
column = cudf.core.column.as_column(categories, dtype=dtype)
if isinstance(column, cudf.core.column.CategoricalColumn):
return column.categories
else:
return column
def __eq__(self, other: Dtype) -> bool:
if isinstance(other, str):
return other == self.name
elif other is self:
return True
elif not isinstance(other, self.__class__):
return False
elif self.ordered != other.ordered:
return False
elif self._categories is None or other._categories is None:
return True
else:
return (
self._categories.dtype == other._categories.dtype
and self._categories.equals(other._categories)
)
def construct_from_string(self):
raise NotImplementedError()
def serialize(self):
header = {}
header["type-serialized"] = pickle.dumps(type(self))
header["ordered"] = self.ordered
frames = []
if self.categories is not None:
categories_header, categories_frames = self.categories.serialize()
header["categories"] = categories_header
frames.extend(categories_frames)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
header, frames, klass = _decode_type(cls, header, frames)
ordered = header["ordered"]
categories_header = header["categories"]
categories_frames = frames
categories_type = pickle.loads(categories_header["type-serialized"])
categories = categories_type.deserialize(
categories_header, categories_frames
)
return klass(categories=categories, ordered=ordered)
def __repr__(self):
return self.to_pandas().__repr__()
class ListDtype(_BaseDtype):
"""
Type to represent list data.
Parameters
----------
element_type : object
A dtype with which represents the element types in the list.
Attributes
----------
element_type
leaf_type
Methods
-------
from_arrow
to_arrow
Examples
--------
>>> import cudf
>>> list_dtype = cudf.ListDtype("int32")
>>> list_dtype
ListDtype(int32)
A nested list dtype can be created by:
>>> nested_list_dtype = cudf.ListDtype(list_dtype)
>>> nested_list_dtype
ListDtype(ListDtype(int32))
"""
_typ: pa.ListType
name: str = "list"
def __init__(self, element_type: Any) -> None:
if isinstance(element_type, ListDtype):
self._typ = pa.list_(element_type._typ)
else:
element_type = cudf.utils.dtypes.cudf_dtype_to_pa_type(
element_type
)
self._typ = pa.list_(element_type)
@cached_property
def element_type(self) -> Dtype:
"""
Returns the element type of the ``ListDtype``.
Returns
-------
Dtype
Examples
--------
>>> import cudf
>>> deep_nested_type = cudf.ListDtype(cudf.ListDtype(cudf.ListDtype("float32")))
>>> deep_nested_type
ListDtype(ListDtype(ListDtype(float32)))
>>> deep_nested_type.element_type
ListDtype(ListDtype(float32))
>>> deep_nested_type.element_type.element_type
ListDtype(float32)
>>> deep_nested_type.element_type.element_type.element_type
'float32'
""" # noqa: E501
if isinstance(self._typ.value_type, pa.ListType):
return ListDtype.from_arrow(self._typ.value_type)
elif isinstance(self._typ.value_type, pa.StructType):
return StructDtype.from_arrow(self._typ.value_type)
else:
return cudf.dtype(self._typ.value_type.to_pandas_dtype())
@cached_property
def leaf_type(self):
"""
Returns the type of the leaf values.
Examples
--------
>>> import cudf
>>> deep_nested_type = cudf.ListDtype(cudf.ListDtype(cudf.ListDtype("float32")))
>>> deep_nested_type
ListDtype(ListDtype(ListDtype(float32)))
>>> deep_nested_type.leaf_type
'float32'
""" # noqa: E501
if isinstance(self.element_type, ListDtype):
return self.element_type.leaf_type
else:
return self.element_type
@property
def type(self):
# TODO: we should change this to return something like a
# ListDtypeType, once we figure out what that should look like
return pa.array
@classmethod
def from_arrow(cls, typ):
"""
Creates a ``ListDtype`` from ``pyarrow.ListType``.
Parameters
----------
typ : pyarrow.ListType
A ``pyarrow.ListType`` that has to be converted to
``ListDtype``.
Returns
-------
obj : ``ListDtype``
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> arrow_type = pa.infer_type([[1]])
>>> arrow_type
ListType(list<item: int64>)
>>> list_dtype = cudf.ListDtype.from_arrow(arrow_type)
>>> list_dtype
ListDtype(int64)
"""
obj = object.__new__(cls)
obj._typ = typ
return obj
def to_arrow(self):
"""
Convert to a ``pyarrow.ListType``
Examples
--------
>>> import cudf
>>> list_dtype = cudf.ListDtype(cudf.ListDtype("float32"))
>>> list_dtype
ListDtype(ListDtype(float32))
>>> list_dtype.to_arrow()
ListType(list<item: list<item: float>>)
"""
return self._typ
def __eq__(self, other):
if isinstance(other, str):
return other == self.name
if not isinstance(other, ListDtype):
return False
return self._typ.equals(other._typ)
def __repr__(self):
if isinstance(self.element_type, (ListDtype, StructDtype)):
return f"{type(self).__name__}({repr(self.element_type)})"
else:
return f"{type(self).__name__}({self.element_type})"
def __hash__(self):
return hash(self._typ)
def serialize(self) -> Tuple[dict, list]:
header: Dict[str, Dtype] = {}
header["type-serialized"] = pickle.dumps(type(self))
frames = []
if isinstance(self.element_type, _BaseDtype):
header["element-type"], frames = self.element_type.serialize()
else:
header["element-type"] = getattr(
self.element_type, "name", self.element_type
)
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list):
header, frames, klass = _decode_type(cls, header, frames)
if isinstance(header["element-type"], dict):
element_type = pickle.loads(
header["element-type"]["type-serialized"]
).deserialize(header["element-type"], frames)
else:
element_type = header["element-type"]
return klass(element_type=element_type)
@cached_property
def itemsize(self):
return self.element_type.itemsize
class StructDtype(_BaseDtype):
"""
Type to represent a struct data.
Parameters
----------
fields : dict
A mapping of field names to dtypes, the dtypes can themselves
be of ``StructDtype`` too.
Attributes
----------
fields
itemsize
Methods
-------
from_arrow
to_arrow
Examples
--------
>>> import cudf
>>> struct_dtype = cudf.StructDtype({"a": "int64", "b": "string"})
>>> struct_dtype
StructDtype({'a': dtype('int64'), 'b': dtype('O')})
A nested ``StructDtype`` can also be constructed in the following way:
>>> nested_struct_dtype = cudf.StructDtype({"dict_data": struct_dtype, "c": "uint8"})
>>> nested_struct_dtype
StructDtype({'dict_data': StructDtype({'a': dtype('int64'), 'b': dtype('O')}), 'c': dtype('uint8')})
""" # noqa: E501
name = "struct"
def __init__(self, fields):
pa_fields = {
k: cudf.utils.dtypes.cudf_dtype_to_pa_type(v)
for k, v in fields.items()
}
self._typ = pa.struct(pa_fields)
@property
def fields(self):
"""
Returns an ordered dict of column name and dtype key-value.
Examples
--------
>>> import cudf
>>> struct_dtype = cudf.StructDtype({"a": "int64", "b": "string"})
>>> struct_dtype
StructDtype({'a': dtype('int64'), 'b': dtype('O')})
>>> struct_dtype.fields
{'a': dtype('int64'), 'b': dtype('O')}
"""
return {
field.name: cudf.utils.dtypes.cudf_dtype_from_pa_type(field.type)
for field in self._typ
}
@property
def type(self):
# TODO: we should change this to return something like a
# StructDtypeType, once we figure out what that should look like
return dict
@classmethod
def from_arrow(cls, typ):
"""
Convert a ``pyarrow.StructType`` to ``StructDtype``.
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> pa_struct_type = pa.struct({'x': pa.int32(), 'y': pa.string()})
>>> pa_struct_type
StructType(struct<x: int32, y: string>)
>>> cudf.StructDtype.from_arrow(pa_struct_type)
StructDtype({'x': dtype('int32'), 'y': dtype('O')})
"""
obj = object.__new__(cls)
obj._typ = typ
return obj
def to_arrow(self):
"""
Convert a ``StructDtype`` to a ``pyarrow.StructType``.
Examples
--------
>>> import cudf
>>> struct_type = cudf.StructDtype({"x": "int32", "y": "string"})
>>> struct_type
StructDtype({'x': dtype('int32'), 'y': dtype('O')})
>>> struct_type.to_arrow()
StructType(struct<x: int32, y: string>)
"""
return self._typ
def __eq__(self, other):
if isinstance(other, str):
return other == self.name
if not isinstance(other, StructDtype):
return False
return self._typ.equals(other._typ)
def __repr__(self):
return f"{type(self).__name__}({self.fields})"
def __hash__(self):
return hash(self._typ)
def serialize(self) -> Tuple[dict, list]:
header: Dict[str, Any] = {}
header["type-serialized"] = pickle.dumps(type(self))
frames: List[Buffer] = []
fields: Dict[str, Union[bytes, Tuple[Any, Tuple[int, int]]]] = {}
for k, dtype in self.fields.items():
if isinstance(dtype, _BaseDtype):
dtype_header, dtype_frames = dtype.serialize()
fields[k] = (
dtype_header,
(len(frames), len(frames) + len(dtype_frames)),
)
frames.extend(dtype_frames)
else:
fields[k] = pickle.dumps(dtype)
header["fields"] = fields
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list):
header, frames, klass = _decode_type(cls, header, frames)
fields = {}
for k, dtype in header["fields"].items():
if isinstance(dtype, tuple):
dtype_header, (start, stop) = dtype
fields[k] = pickle.loads(
dtype_header["type-serialized"]
).deserialize(
dtype_header,
frames[start:stop],
)
else:
fields[k] = pickle.loads(dtype)
return cls(fields)
@cached_property
def itemsize(self):
return sum(
cudf.utils.dtypes.cudf_dtype_from_pa_type(field.type).itemsize
for field in self._typ
)
decimal_dtype_template = textwrap.dedent(
"""
Type to represent a ``decimal{size}`` data.
Parameters
----------
precision : int
The total number of digits in each value of this dtype
scale : int, optional
The scale of the dtype. See Notes below.
Attributes
----------
precision
scale
itemsize
Methods
-------
to_arrow
from_arrow
Notes
-----
When the scale is positive:
- numbers with fractional parts (e.g., 0.0042) can be represented
- the scale is the total number of digits to the right of the
decimal point
When the scale is negative:
- only multiples of powers of 10 (including 10**0) can be
represented (e.g., 1729, 4200, 1000000)
- the scale represents the number of trailing zeros in the value.
For example, 42 is representable with precision=2 and scale=0.
13.0051 is representable with precision=6 and scale=4,
and *not* representable with precision<6 or scale<4.
Examples
--------
>>> import cudf
>>> decimal{size}_dtype = cudf.Decimal{size}Dtype(precision=9, scale=2)
>>> decimal{size}_dtype
Decimal{size}Dtype(precision=9, scale=2)
""" # noqa: E501
)
class DecimalDtype(_BaseDtype):
_metadata = ("precision", "scale")
def __init__(self, precision, scale=0):
self._validate(precision, scale)
self._typ = pa.decimal128(precision, scale)
@property
def str(self):
return f"{str(self.name)}({self.precision}, {self.scale})"
@property
def precision(self):
"""
The decimal precision, in number of decimal digits (an integer).
"""
return self._typ.precision
@precision.setter
def precision(self, value):
self._validate(value, self.scale)
self._typ = pa.decimal128(precision=value, scale=self.scale)
@property
def scale(self):
"""
The decimal scale (an integer).
"""
return self._typ.scale
@property
def itemsize(self):
"""
Length of one column element in bytes.
"""
return self.ITEMSIZE
@property
def type(self):
# might need to account for precision and scale here
return decimal.Decimal
def to_arrow(self):
"""
Return the equivalent ``pyarrow`` dtype.
"""
return self._typ
@classmethod
def from_arrow(cls, typ):
"""
Construct a cudf decimal dtype from a ``pyarrow`` dtype
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> pa_type = pa.decimal128(precision=9, scale=2)
Constructing a ``Decimal32Dtype``:
>>> cudf.Decimal32Dtype.from_arrow(pa_type)
Decimal64Dtype(precision=9, scale=2)
Constructing a ``Decimal64Dtype``:
>>> cudf.Decimal64Dtype.from_arrow(pa_type)
Decimal64Dtype(precision=9, scale=2)
Constructing a ``Decimal128Dtype``:
>>> cudf.Decimal128Dtype.from_arrow(pa_type)
Decimal128Dtype(precision=9, scale=2)
"""
return cls(typ.precision, typ.scale)
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"(precision={self.precision}, scale={self.scale})"
)
@classmethod
def _validate(cls, precision, scale=0):
if precision > cls.MAX_PRECISION:
raise ValueError(
f"Cannot construct a {cls.__name__}"
f" with precision > {cls.MAX_PRECISION}"
)
if abs(scale) > precision:
raise ValueError(f"scale={scale} exceeds precision={precision}")
@classmethod
def _from_decimal(cls, decimal):
"""
Create a cudf.DecimalDtype from a decimal.Decimal object
"""
metadata = decimal.as_tuple()
precision = max(len(metadata.digits), -metadata.exponent)
return cls(precision, -metadata.exponent)
def serialize(self) -> Tuple[dict, list]:
return (
{
"type-serialized": pickle.dumps(type(self)),
"precision": self.precision,
"scale": self.scale,
"frame_count": 0,
},
[],
)
@classmethod
def deserialize(cls, header: dict, frames: list):
header, frames, klass = _decode_type(
cls, header, frames, is_valid_class=issubclass
)
klass = pickle.loads(header["type-serialized"])
return klass(header["precision"], header["scale"])
def __eq__(self, other: Dtype) -> bool:
if other is self:
return True
elif not isinstance(other, self.__class__):
return False
return self.precision == other.precision and self.scale == other.scale
def __hash__(self):
return hash(self._typ)
@doc_apply(
decimal_dtype_template.format(
size="32",
)
)
class Decimal32Dtype(DecimalDtype):
name = "decimal32"
MAX_PRECISION = np.floor(np.log10(np.iinfo("int32").max))
ITEMSIZE = 4
@doc_apply(
decimal_dtype_template.format(
size="64",
)
)
class Decimal64Dtype(DecimalDtype):
name = "decimal64"
MAX_PRECISION = np.floor(np.log10(np.iinfo("int64").max))
ITEMSIZE = 8
@doc_apply(
decimal_dtype_template.format(
size="128",
)
)
class Decimal128Dtype(DecimalDtype):
name = "decimal128"
MAX_PRECISION = 38
ITEMSIZE = 16
class IntervalDtype(StructDtype):
"""
subtype: str, np.dtype
The dtype of the Interval bounds.
closed: {'right', 'left', 'both', 'neither'}, default 'right'
Whether the interval is closed on the left-side, right-side,
both or neither. See the Notes for more detailed explanation.
"""
name = "interval"
def __init__(self, subtype, closed="right"):
super().__init__(fields={"left": subtype, "right": subtype})
if closed is None:
closed = "right"
if closed in ["left", "right", "neither", "both"]:
self.closed = closed
else:
raise ValueError("closed value is not valid")
@property
def subtype(self):
return self.fields["left"]
def __repr__(self) -> str:
return f"interval[{self.subtype}, {self.closed}]"
def __str__(self) -> str:
return self.__repr__()
@classmethod
def from_arrow(cls, typ):
return IntervalDtype(typ.subtype.to_pandas_dtype(), typ.closed)
def to_arrow(self):
return ArrowIntervalType(
pa.from_numpy_dtype(self.subtype), self.closed
)
@classmethod
def from_pandas(cls, pd_dtype: pd.IntervalDtype) -> "IntervalDtype":
return cls(subtype=pd_dtype.subtype, closed=pd_dtype.closed)
def to_pandas(self) -> pd.IntervalDtype:
return pd.IntervalDtype(subtype=self.subtype, closed=self.closed)
def __eq__(self, other):
if isinstance(other, str):
# This means equality isn't transitive but mimics pandas
return other == self.name
return (
type(self) == type(other)
and self.subtype == other.subtype
and self.closed == other.closed
)
def __hash__(self):
return hash((self.subtype, self.closed))
def serialize(self) -> Tuple[dict, list]:
header = {
"type-serialized": pickle.dumps(type(self)),
"fields": pickle.dumps((self.subtype, self.closed)),
"frame_count": 0,
}
return header, []
@classmethod
def deserialize(cls, header: dict, frames: list):
header, frames, klass = _decode_type(cls, header, frames)
klass = pickle.loads(header["type-serialized"])
subtype, closed = pickle.loads(header["fields"])
return klass(subtype, closed=closed)
def is_categorical_dtype(obj):
"""Check whether an array-like or dtype is of the Categorical dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of a categorical dtype.
"""
if obj is None:
return False
if isinstance(
obj,
(
pd_CategoricalDtype,
cudf.CategoricalDtype,
cudf.core.index.CategoricalIndex,
cudf.core.column.CategoricalColumn,
pd.Categorical,
pd.CategoricalIndex,
),
):
return True
# Note that we cannot directly use `obj in (...)` because that triggers
# equality as well as identity checks and pandas extension dtypes won't
# allow converting that equality check to a boolean; `__nonzero__` is
# disabled because they treat dtypes as "array-like".
if any(
obj is t
for t in (
cudf.CategoricalDtype,
pd_CategoricalDtype,
pd_CategoricalDtypeType,
)
):
return True
if isinstance(obj, (np.ndarray, np.dtype)):
return False
if isinstance(obj, str) and obj == "category":
return True
if isinstance(obj, cudf.core.index.BaseIndex):
return obj._is_categorical()
if isinstance(
obj,
(
cudf.Series,
cudf.core.column.ColumnBase,
pd.Index,
pd.Series,
),
):
return is_categorical_dtype(obj.dtype)
if hasattr(obj, "type"):
if obj.type is pd_CategoricalDtypeType:
return True
# TODO: A lot of the above checks are probably redundant and should be
# farmed out to this function here instead.
return pd_types.is_categorical_dtype(obj)
def is_list_dtype(obj):
"""Check whether an array-like or dtype is of the list dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the list dtype.
"""
return (
type(obj) is cudf.core.dtypes.ListDtype
or obj is cudf.core.dtypes.ListDtype
or type(obj) is cudf.core.column.ListColumn
or obj is cudf.core.column.ListColumn
or (isinstance(obj, str) and obj == cudf.core.dtypes.ListDtype.name)
or (hasattr(obj, "dtype") and is_list_dtype(obj.dtype))
)
def is_struct_dtype(obj):
"""Check whether an array-like or dtype is of the struct dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the struct dtype.
"""
# TODO: This behavior is currently inconsistent for interval types. the
# actual class IntervalDtype will return False, but instances (e.g.
# IntervalDtype(int)) will return True. For now this is not being changed
# since the interval dtype is being modified as part of the array refactor,
# but this behavior should be made consistent afterwards.
return (
isinstance(obj, cudf.core.dtypes.StructDtype)
or obj is cudf.core.dtypes.StructDtype
or (isinstance(obj, str) and obj == cudf.core.dtypes.StructDtype.name)
or (hasattr(obj, "dtype") and is_struct_dtype(obj.dtype))
)
def is_decimal_dtype(obj):
"""Check whether an array-like or dtype is of the decimal dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the decimal dtype.
"""
return (
is_decimal32_dtype(obj)
or is_decimal64_dtype(obj)
or is_decimal128_dtype(obj)
)
def is_interval_dtype(obj):
"""Check whether an array-like or dtype is of the interval dtype.
Parameters
----------
obj : array-like or dtype
The array-like or dtype to check.
Returns
-------
bool
Whether or not the array-like or dtype is of the interval dtype.
"""
# TODO: Should there be any branch in this function that calls
# pd.api.types.is_interval_dtype?
return (
isinstance(
obj,
(
cudf.core.dtypes.IntervalDtype,
pd.IntervalDtype,
),
)
or obj is cudf.core.dtypes.IntervalDtype
or (isinstance(obj, cudf.core.index.BaseIndex) and obj._is_interval())
or (
isinstance(obj, str) and obj == cudf.core.dtypes.IntervalDtype.name
)
or (hasattr(obj, "dtype") and is_interval_dtype(obj.dtype))
)
def is_decimal32_dtype(obj):
return (
type(obj) is cudf.core.dtypes.Decimal32Dtype
or obj is cudf.core.dtypes.Decimal32Dtype
or (
isinstance(obj, str)
and obj == cudf.core.dtypes.Decimal32Dtype.name
)
or (hasattr(obj, "dtype") and is_decimal32_dtype(obj.dtype))
)
def is_decimal64_dtype(obj):
return (
type(obj) is cudf.core.dtypes.Decimal64Dtype
or obj is cudf.core.dtypes.Decimal64Dtype
or (
isinstance(obj, str)
and obj == cudf.core.dtypes.Decimal64Dtype.name
)
or (hasattr(obj, "dtype") and is_decimal64_dtype(obj.dtype))
)
def is_decimal128_dtype(obj):
return (
type(obj) is cudf.core.dtypes.Decimal128Dtype
or obj is cudf.core.dtypes.Decimal128Dtype
or (
isinstance(obj, str)
and obj == cudf.core.dtypes.Decimal128Dtype.name
)
or (hasattr(obj, "dtype") and is_decimal128_dtype(obj.dtype))
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/row_function.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
import math
import numpy as np
from numba import cuda
from numba.np import numpy_support
from cudf.core.udf.api import Masked, pack_return
from cudf.core.udf.masked_typing import MaskedType
from cudf.core.udf.strings_typing import string_view
from cudf.core.udf.templates import (
masked_input_initializer_template,
row_initializer_template,
row_kernel_template,
unmasked_input_initializer_template,
)
from cudf.core.udf.utils import (
Row,
_all_dtypes_from_frame,
_construct_signature,
_get_extensionty_size,
_get_kernel,
_get_udf_return_type,
_mask_get,
_supported_cols_from_frame,
_supported_dtypes_from_frame,
)
def _get_frame_row_type(dtype):
"""
Get the Numba type of a row in a frame. Models each column and its mask as
a MaskedType and models the row as a dictionary like data structure
containing these MaskedTypes. Large parts of this function are copied with
comments from the Numba internals and slightly modified to account for
validity bools to be present in the final struct. See
numba.np.numpy_support.from_struct_dtype for details.
"""
# Create the numpy structured type corresponding to the numpy dtype.
fields = []
offset = 0
sizes = [
_get_extensionty_size(string_view)
if val[0] == np.dtype("O")
else val[0].itemsize
for val in dtype.fields.values()
]
for i, (name, info) in enumerate(dtype.fields.items()):
# *info* consists of the element dtype, its offset from the beginning
# of the record, and an optional "title" containing metadata.
# We ignore the offset in info because its value assumes no masking;
# instead, we compute the correct offset based on the masked type.
elemdtype = info[0]
title = info[2] if len(info) == 3 else None
ty = (
# columns of dtype string start life as string_view
string_view
if elemdtype == np.dtype("O")
else numpy_support.from_dtype(elemdtype)
)
infos = {
"type": MaskedType(ty),
"offset": offset,
"title": title,
}
fields.append((name, infos))
# increment offset by itemsize plus one byte for validity
itemsize = (
_get_extensionty_size(string_view)
if elemdtype == np.dtype("O")
else elemdtype.itemsize
)
offset += itemsize + 1
# Align the next member of the struct to be a multiple of the
# memory access size, per PTX ISA 7.4/5.4.5
if i < len(sizes) - 1:
next_itemsize = sizes[i + 1]
offset = int(math.ceil(offset / next_itemsize) * next_itemsize)
# Numba requires that structures are aligned for the CUDA target
_is_aligned_struct = True
return Row(fields, offset, _is_aligned_struct)
def _row_kernel_string_from_template(frame, row_type, args):
"""
Function to write numba kernels for `DataFrame.apply` as a string.
Workaround until numba supports functions that use `*args`
`DataFrame.apply` expects functions of a dict like row as well as
possibly one or more scalar arguments
def f(row, c, k):
return (row['x'] + c) / k
Both the number of input columns as well as their nullability and any
scalar arguments may vary, so the kernels vary significantly. See
templates.py for the full row kernel template and more details.
"""
# Create argument list for kernel
frame = _supported_cols_from_frame(frame)
input_columns = ", ".join([f"input_col_{i}" for i in range(len(frame))])
input_offsets = ", ".join([f"offset_{i}" for i in range(len(frame))])
extra_args = ", ".join([f"extra_arg_{i}" for i in range(len(args))])
# Generate the initializers for each device function argument
initializers = []
row_initializers = []
for i, (colname, col) in enumerate(frame.items()):
idx = str(i)
template = (
masked_input_initializer_template
if col.mask is not None
else unmasked_input_initializer_template
)
initializers.append(template.format(idx=idx))
row_initializers.append(
row_initializer_template.format(idx=idx, name=colname)
)
return row_kernel_template.format(
input_columns=input_columns,
input_offsets=input_offsets,
extra_args=extra_args,
masked_input_initializers="\n".join(initializers),
row_initializers="\n".join(row_initializers),
numba_rectype=row_type,
)
def _get_row_kernel(frame, func, args):
row_type = _get_frame_row_type(
np.dtype(list(_all_dtypes_from_frame(frame).items()))
)
scalar_return_type = _get_udf_return_type(row_type, func, args)
# this is the signature for the final full kernel compilation
sig = _construct_signature(frame, scalar_return_type, args)
# this row type is used within the kernel to pack up the column and
# mask data into the dict like data structure the user udf expects
np_field_types = np.dtype(
list(_supported_dtypes_from_frame(frame).items())
)
row_type = _get_frame_row_type(np_field_types)
# Dict of 'local' variables into which `_kernel` is defined
global_exec_context = {
"cuda": cuda,
"Masked": Masked,
"_mask_get": _mask_get,
"pack_return": pack_return,
"row_type": row_type,
}
kernel_string = _row_kernel_string_from_template(frame, row_type, args)
kernel = _get_kernel(kernel_string, global_exec_context, sig, func)
return kernel, scalar_return_type
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/_ops.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
import math
import operator
arith_ops = [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
operator.pow,
operator.iadd,
operator.isub,
operator.imul,
operator.itruediv,
operator.floordiv,
operator.ipow,
operator.imod,
]
bitwise_ops = [operator.and_, operator.or_, operator.xor]
unary_ops = [
math.acos,
math.acosh,
math.asin,
math.asinh,
math.atan,
math.atanh,
math.ceil,
math.cos,
math.degrees,
math.erf,
math.erfc,
math.exp,
math.expm1,
math.fabs,
math.floor,
math.gamma,
math.lgamma,
math.log,
math.log10,
math.log1p,
math.log2,
math.radians,
math.sin,
math.sinh,
math.sqrt,
math.tan,
math.tanh,
operator.pos,
operator.neg,
operator.not_,
operator.invert,
]
comparison_ops = [
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
]
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/scalar_function.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from numba import cuda
from numba.np import numpy_support
from cudf.core.udf.api import Masked, pack_return
from cudf.core.udf.masked_typing import MaskedType
from cudf.core.udf.strings_typing import string_view
from cudf.core.udf.templates import (
masked_input_initializer_template,
scalar_kernel_template,
unmasked_input_initializer_template,
)
from cudf.core.udf.utils import (
_construct_signature,
_get_kernel,
_get_udf_return_type,
_mask_get,
)
def _scalar_kernel_string_from_template(sr, args):
"""
Function to write numba kernels for `Series.apply` as a string.
Workaround until numba supports functions that use `*args`
`Series.apply` expects functions of a single variable and possibly
one or more constants, such as:
def f(x, c, k):
return (x + c) / k
where the `x` are meant to be the values of the series. Since there
can be only one column, the only thing that varies in the kinds of
kernels that we want is the number of extra_args. See templates.py
for the full kernel template.
"""
extra_args = ", ".join([f"extra_arg_{i}" for i in range(len(args))])
masked_initializer = (
masked_input_initializer_template
if sr._column.mask
else unmasked_input_initializer_template
).format(idx=0)
return scalar_kernel_template.format(
extra_args=extra_args, masked_initializer=masked_initializer
)
def _get_scalar_kernel(sr, func, args):
sr_type = MaskedType(
string_view if sr.dtype == "O" else numpy_support.from_dtype(sr.dtype)
)
scalar_return_type = _get_udf_return_type(sr_type, func, args)
sig = _construct_signature(sr, scalar_return_type, args=args)
f_ = cuda.jit(device=True)(func)
global_exec_context = {
"f_": f_,
"cuda": cuda,
"Masked": Masked,
"_mask_get": _mask_get,
"pack_return": pack_return,
}
kernel_string = _scalar_kernel_string_from_template(sr, args=args)
kernel = _get_kernel(kernel_string, global_exec_context, sig, func)
return kernel, scalar_return_type
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/templates.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
unmasked_input_initializer_template = """\
d_{idx} = input_col_{idx}
masked_{idx} = Masked(d_{idx}[i], True)
"""
masked_input_initializer_template = """\
d_{idx}, m_{idx} = input_col_{idx}
masked_{idx} = Masked(d_{idx}[i], _mask_get(m_{idx}, i + offset_{idx}))
"""
row_initializer_template = """\
row["{name}"] = masked_{idx}
"""
group_initializer_template = """\
arr_{idx} = input_col_{idx}[offset[block_id]:offset[block_id+1]]
dataframe_group["{name}"] = Group(arr_{idx}, size, arr_index)
"""
row_kernel_template = """\
def _kernel(retval, size, {input_columns}, {input_offsets}, {extra_args}):
i = cuda.grid(1)
ret_data_arr, ret_mask_arr = retval
if i < size:
# Create a structured array with the desired fields
rows = cuda.local.array(1, dtype=row_type)
# one element of that array
row = rows[0]
{masked_input_initializers}
{row_initializers}
# pass the assembled row into the udf
ret = f_(row, {extra_args})
# pack up the return values and set them
ret_masked = pack_return(ret)
ret_data_arr[i] = ret_masked.value
ret_mask_arr[i] = ret_masked.valid
"""
scalar_kernel_template = """
def _kernel(retval, size, input_col_0, offset_0, {extra_args}):
i = cuda.grid(1)
ret_data_arr, ret_mask_arr = retval
if i < size:
{masked_initializer}
ret = f_(masked_0, {extra_args})
ret_masked = pack_return(ret)
ret_data_arr[i] = ret_masked.value
ret_mask_arr[i] = ret_masked.valid
"""
groupby_apply_kernel_template = """
def _kernel(offset, out, index, {input_columns}, {extra_args}):
tid = cuda.threadIdx.x
block_id = cuda.blockIdx.x
tb_size = cuda.blockDim.x
recarray = cuda.local.array(1, dtype=dataframe_group_type)
dataframe_group = recarray[0]
if block_id < (len(offset) - 1):
size = offset[block_id+1] - offset[block_id]
arr_index = index[offset[block_id]:offset[block_id+1]]
{group_initializers}
result = f_(dataframe_group, {extra_args})
if cuda.threadIdx.x == 0:
out[block_id] = result
"""
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/masked_lowering.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
import operator
from llvmlite import ir
from numba.core import cgutils
from numba.core.typing import signature as nb_signature
from numba.cuda.cudaimpl import (
lower as cuda_lower,
registry as cuda_lowering_registry,
)
from numba.extending import lower_builtin, types
from cudf.core.udf import api
from cudf.core.udf._ops import (
arith_ops,
bitwise_ops,
comparison_ops,
unary_ops,
)
from cudf.core.udf.masked_typing import (
MaskedType,
NAType,
_supported_masked_types,
)
@cuda_lowering_registry.lower_constant(NAType)
def constant_na(context, builder, ty, pyval):
# This handles None, etc.
return context.get_dummy_value()
# In the typing phase, we declared that a `MaskedType` can be
# added to another `MaskedType` and specified what kind of a
# `MaskedType` would result. Now we have to actually fill in
# the implementation details of how to do that. This is where
# we can involve both validities in constructing the answer
def make_arithmetic_op(op):
"""
Make closures that implement arithmetic operations. See
register_arithmetic_op for details.
"""
def masked_scalar_op_impl(context, builder, sig, args):
"""
Implement `MaskedType` <op> `MaskedType`
"""
# MaskedType(...), MaskedType(...)
masked_type_1, masked_type_2 = sig.args
# MaskedType(...)
masked_return_type = sig.return_type
# Let there be two actual LLVM structs backing the two inputs
# https://mapping-high-level-constructs-to-llvm-ir.readthedocs.io/en/latest/basic-constructs/structures.html
m1 = cgutils.create_struct_proxy(masked_type_1)(
context, builder, value=args[0]
)
m2 = cgutils.create_struct_proxy(masked_type_2)(
context, builder, value=args[1]
)
# we will return an output struct
result = cgutils.create_struct_proxy(masked_return_type)(
context, builder
)
# compute output validity
valid = builder.and_(m1.valid, m2.valid)
result.valid = valid
with builder.if_then(valid):
# Let numba handle generating the extra IR needed to perform
# operations on mixed types, by compiling the final core op between
# the two primitive values as a separate function and calling it
result.value = context.compile_internal(
builder,
lambda x, y: op(x, y),
nb_signature(
masked_return_type.value_type,
masked_type_1.value_type,
masked_type_2.value_type,
),
(m1.value, m2.value),
)
return result._getvalue()
return masked_scalar_op_impl
def make_unary_op(op):
"""
Make closures that implement unary operations. See register_unary_op for
details.
"""
def masked_scalar_unary_op_impl(context, builder, sig, args):
"""
Implement <op> `MaskedType`
"""
# MaskedType(...)
masked_type_1 = sig.args[0]
# MaskedType(...)
masked_return_type = sig.return_type
m1 = cgutils.create_struct_proxy(masked_type_1)(
context, builder, value=args[0]
)
# we will return an output struct
result = cgutils.create_struct_proxy(masked_return_type)(
context, builder
)
# compute output validity
result.valid = m1.valid
with builder.if_then(m1.valid):
# Let numba handle generating the extra IR needed to perform
# operations on mixed types, by compiling the final core op between
# the two primitive values as a separate function and calling it
result.value = context.compile_internal(
builder,
lambda x: op(x),
nb_signature(
masked_return_type.value_type,
masked_type_1.value_type,
),
(m1.value,),
)
return result._getvalue()
return masked_scalar_unary_op_impl
def register_arithmetic_op(op):
"""
Register a lowering implementation for the
arithmetic op `op`.
Because the lowering implementations compile the final
op separately using a lambda and compile_internal, `op`
needs to be tied to each lowering implementation using
a closure.
This function makes and lowers a closure for one op.
"""
to_lower_op = make_arithmetic_op(op)
cuda_lower(op, MaskedType, MaskedType)(to_lower_op)
def register_unary_op(op):
"""
Register a lowering implementation for the
unary op `op`.
Because the lowering implementations compile the final
op separately using a lambda and compile_internal, `op`
needs to be tied to each lowering implementation using
a closure.
This function makes and lowers a closure for one op.
"""
to_lower_op = make_unary_op(op)
cuda_lower(op, MaskedType)(to_lower_op)
def masked_scalar_null_op_impl(context, builder, sig, args):
"""
Implement `MaskedType` <op> `NAType`
or `NAType` <op> `MaskedType`
The answer to this is known up front so no actual operation
needs to take place
"""
return_type = sig.return_type # MaskedType(...)
result = cgutils.create_struct_proxy(MaskedType(return_type.value_type))(
context, builder
)
# Invalidate the struct and leave `value` uninitialized
result.valid = context.get_constant(types.boolean, 0)
return result._getvalue()
def make_const_op(op):
def masked_scalar_const_op_impl(context, builder, sig, args):
return_type = sig.return_type
result = cgutils.create_struct_proxy(return_type)(context, builder)
result.valid = context.get_constant(types.boolean, 0)
if isinstance(sig.args[0], MaskedType):
masked_type, const_type = sig.args
masked_value, const_value = args
indata = cgutils.create_struct_proxy(masked_type)(
context, builder, value=masked_value
)
nb_sig = nb_signature(
return_type.value_type, masked_type.value_type, const_type
)
compile_args = (indata.value, const_value)
else:
const_type, masked_type = sig.args
const_value, masked_value = args
indata = cgutils.create_struct_proxy(masked_type)(
context, builder, value=masked_value
)
nb_sig = nb_signature(
return_type.value_type, const_type, masked_type.value_type
)
compile_args = (const_value, indata.value)
with builder.if_then(indata.valid):
result.value = context.compile_internal(
builder, lambda x, y: op(x, y), nb_sig, compile_args
)
result.valid = context.get_constant(types.boolean, 1)
return result._getvalue()
return masked_scalar_const_op_impl
def register_const_op(op):
to_lower_op = make_const_op(op)
cuda_lower(op, MaskedType, types.Number)(to_lower_op)
cuda_lower(op, types.Number, MaskedType)(to_lower_op)
cuda_lower(op, MaskedType, types.Boolean)(to_lower_op)
cuda_lower(op, types.Boolean, MaskedType)(to_lower_op)
cuda_lower(op, MaskedType, types.NPDatetime)(to_lower_op)
cuda_lower(op, types.NPDatetime, MaskedType)(to_lower_op)
cuda_lower(op, MaskedType, types.NPTimedelta)(to_lower_op)
cuda_lower(op, types.NPTimedelta, MaskedType)(to_lower_op)
# register all lowering at init
for binary_op in arith_ops + bitwise_ops + comparison_ops:
register_arithmetic_op(binary_op)
register_const_op(binary_op)
# null op impl can be shared between all ops
cuda_lower(binary_op, MaskedType, NAType)(masked_scalar_null_op_impl)
cuda_lower(binary_op, NAType, MaskedType)(masked_scalar_null_op_impl)
# register all lowering at init
for unary_op in unary_ops:
register_unary_op(unary_op)
register_unary_op(abs)
@cuda_lower(operator.is_, MaskedType, NAType)
@cuda_lower(operator.is_, NAType, MaskedType)
def masked_scalar_is_null_impl(context, builder, sig, args):
"""
Implement `MaskedType` is `NA`
"""
if isinstance(sig.args[1], NAType):
masked_type, na = sig.args
value = args[0]
else:
na, masked_type = sig.args
value = args[1]
indata = cgutils.create_struct_proxy(masked_type)(
context, builder, value=value
)
result = cgutils.alloca_once(builder, ir.IntType(1))
with builder.if_else(indata.valid) as (then, otherwise):
with then:
builder.store(context.get_constant(types.boolean, 0), result)
with otherwise:
builder.store(context.get_constant(types.boolean, 1), result)
return builder.load(result)
# Main kernel always calls `pack_return` on whatever the user defined
# function returned. This returns the same data if its already a `Masked`
# else packs it up into a new one that is valid from the get go
@cuda_lower(api.pack_return, MaskedType)
def pack_return_masked_impl(context, builder, sig, args):
return args[0]
@cuda_lower(api.pack_return, types.Boolean)
@cuda_lower(api.pack_return, types.Number)
@cuda_lower(api.pack_return, types.NPDatetime)
@cuda_lower(api.pack_return, types.NPTimedelta)
def pack_return_scalar_impl(context, builder, sig, args):
outdata = cgutils.create_struct_proxy(sig.return_type)(context, builder)
outdata.value = args[0]
outdata.valid = context.get_constant(types.boolean, 1)
return outdata._getvalue()
@cuda_lower(operator.truth, MaskedType)
@cuda_lower(bool, MaskedType)
def masked_scalar_bool_impl(context, builder, sig, args):
indata = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
result = cgutils.alloca_once(builder, ir.IntType(1))
with builder.if_else(indata.valid) as (then, otherwise):
with then:
builder.store(
context.cast(
builder,
indata.value,
sig.args[0].value_type,
types.boolean,
),
result,
)
with otherwise:
builder.store(context.get_constant(types.boolean, 0), result)
return builder.load(result)
@cuda_lower(float, MaskedType)
@cuda_lower(int, MaskedType)
def masked_scalar_cast_impl(context, builder, sig, args):
input = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
result = cgutils.create_struct_proxy(sig.return_type)(context, builder)
casted = context.cast(
builder,
input.value,
sig.args[0].value_type,
sig.return_type.value_type,
)
result.value = casted
result.valid = input.valid
return result._getvalue()
# To handle the unification, we need to support casting from any type to a
# masked type. The cast implementation takes the value passed in and returns
# a masked type struct wrapping that value.
@cuda_lowering_registry.lower_cast(types.Any, MaskedType)
def cast_primitive_to_masked(context, builder, fromty, toty, val):
casted = context.cast(builder, val, fromty, toty.value_type)
ext = cgutils.create_struct_proxy(toty)(context, builder)
ext.value = casted
ext.valid = context.get_constant(types.boolean, 1)
return ext._getvalue()
@cuda_lowering_registry.lower_cast(NAType, MaskedType)
def cast_na_to_masked(context, builder, fromty, toty, val):
result = cgutils.create_struct_proxy(toty)(context, builder)
result.valid = context.get_constant(types.boolean, 0)
return result._getvalue()
@cuda_lowering_registry.lower_cast(MaskedType, MaskedType)
def cast_masked_to_masked(context, builder, fromty, toty, val):
"""
When numba encounters an op that expects a certain type and
the input to the op is not of the expected type it will try
to cast the input to the appropriate type. But, in our case
the input may be a MaskedType, which numba doesn't natively
know how to cast to a different MaskedType with a different
`value_type`. This implements and registers that cast.
"""
# We will
operand = cgutils.create_struct_proxy(fromty)(context, builder, value=val)
casted = context.cast(
builder, operand.value, fromty.value_type, toty.value_type
)
ext = cgutils.create_struct_proxy(toty)(context, builder)
ext.value = casted
ext.valid = operand.valid
return ext._getvalue()
# Masked constructor for use in a kernel for testing
def masked_constructor(context, builder, sig, args):
ty = sig.return_type
value, valid = args
masked = cgutils.create_struct_proxy(ty)(context, builder)
masked.value = value
masked.valid = valid
return masked._getvalue()
for ty in _supported_masked_types:
lower_builtin(api.Masked, ty, types.boolean)(masked_constructor)
# Allows us to make an instance of MaskedType a global variable
# and properly use it inside functions we will later compile
@cuda_lowering_registry.lower_constant(MaskedType)
def lower_constant_masked(context, builder, ty, val):
masked = cgutils.create_struct_proxy(ty)(context, builder)
masked.value = context.get_constant(ty.value_type, val.value)
masked.valid = context.get_constant(types.boolean, val.valid)
return masked._getvalue()
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/groupby_typing.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from typing import Any, Dict
import numba
from numba import cuda, types
from numba.core.extending import (
make_attribute_wrapper,
models,
register_model,
type_callable,
typeof_impl,
)
from numba.core.typing import signature as nb_signature
from numba.core.typing.templates import AbstractTemplate, AttributeTemplate
from numba.cuda.cudadecl import registry as cuda_registry
from numba.np import numpy_support
index_default_type = types.int64
group_size_type = types.int64
SUPPORTED_GROUPBY_NUMBA_TYPES = [
types.int32,
types.int64,
types.float32,
types.float64,
]
SUPPORTED_GROUPBY_NUMPY_TYPES = [
numpy_support.as_dtype(dt) for dt in SUPPORTED_GROUPBY_NUMBA_TYPES
]
class Group:
"""
A piece of python code whose purpose is to be replaced
during compilation. After being registered to GroupType,
serves as a handle for instantiating GroupType objects
in python code and accessing their attributes
"""
pass
class GroupType(numba.types.Type):
"""
Numba extension type carrying metadata associated with a single
GroupBy group. This metadata ultimately is passed to the CUDA
__device__ function which actually performs the work.
"""
def __init__(self, group_scalar_type, index_type=index_default_type):
self.group_scalar_type = group_scalar_type
self.index_type = index_type
self.group_data_type = types.CPointer(group_scalar_type)
self.group_size_type = group_size_type
self.group_index_type = types.CPointer(index_type)
super().__init__(
name=f"Group({self.group_scalar_type}, {self.index_type})"
)
@typeof_impl.register(Group)
def typeof_group(val, c):
"""
Tie Group and GroupType together such that when Numba
sees usage of Group in raw python code, it knows to
treat those usages as uses of GroupType
"""
return GroupType(
numba.np.numpy_support.from_dtype(val.dtype),
numba.np.numpy_support.from_dtype(val.index_dtype),
)
# The typing of the python "function" Group.__init__
# as it appears in python code
@type_callable(Group)
def type_group(context):
def typer(group_data, size, index):
if (
isinstance(group_data, types.Array)
and isinstance(size, types.Integer)
and isinstance(index, types.Array)
):
return GroupType(group_data.dtype, index.dtype)
return typer
@register_model(GroupType)
class GroupModel(models.StructModel):
"""
Model backing GroupType instances. See the link below for details.
https://github.com/numba/numba/blob/main/numba/core/datamodel/models.py
"""
def __init__(self, dmm, fe_type):
members = [
("group_data", types.CPointer(fe_type.group_scalar_type)),
("size", group_size_type),
("index", types.CPointer(fe_type.index_type)),
]
super().__init__(dmm, fe_type, members)
call_cuda_functions: Dict[Any, Any] = {}
def _register_cuda_binary_reduction_caller(funcname, lty, rty, retty):
cuda_func = cuda.declare_device(
f"Block{funcname}_{lty}_{rty}",
retty(types.CPointer(lty), types.CPointer(rty), group_size_type),
)
def caller(lhs, rhs, size):
return cuda_func(lhs, rhs, size)
call_cuda_functions.setdefault(funcname.lower(), {})
type_key = retty, lty, rty
call_cuda_functions[funcname.lower()][type_key] = caller
def _register_cuda_unary_reduction_caller(funcname, inputty, retty):
cuda_func = cuda.declare_device(
f"Block{funcname}_{inputty}",
retty(types.CPointer(inputty), group_size_type),
)
def caller(data, size):
return cuda_func(data, size)
call_cuda_functions.setdefault(funcname.lower(), {})
type_key = (retty, inputty)
call_cuda_functions[funcname.lower()][type_key] = caller
def _register_cuda_idx_reduction_caller(funcname, inputty):
cuda_func = cuda.declare_device(
f"Block{funcname}_{inputty}",
types.int64(
types.CPointer(inputty),
types.CPointer(index_default_type),
group_size_type,
),
)
def caller(data, index, size):
return cuda_func(data, index, size)
# only support default index type right now
type_key = (index_default_type, inputty)
call_cuda_functions.setdefault(funcname.lower(), {})
call_cuda_functions[funcname.lower()][type_key] = caller
def _make_unary_attr(funcname):
class GroupUnaryReductionAttrTyping(AbstractTemplate):
key = f"GroupType.{funcname}"
def generic(self, args, kws):
for retty, inputty in call_cuda_functions[funcname.lower()].keys():
if self.this.group_scalar_type == inputty:
return nb_signature(retty, recvr=self.this)
return None
def _attr(self, mod):
return types.BoundFunction(
GroupUnaryReductionAttrTyping,
GroupType(mod.group_scalar_type, mod.index_type),
)
return _attr
def _create_reduction_attr(name, retty=None):
class Attr(AbstractTemplate):
key = name
def generic(self, args, kws):
return nb_signature(
self.this.group_scalar_type if not retty else retty,
recvr=self.this,
)
Attr.generic = generic
def _attr(self, mod):
return types.BoundFunction(
Attr, GroupType(mod.group_scalar_type, mod.index_type)
)
return _attr
class GroupIdxMax(AbstractTemplate):
key = "GroupType.idxmax"
def generic(self, args, kws):
return nb_signature(self.this.index_type, recvr=self.this)
class GroupIdxMin(AbstractTemplate):
key = "GroupType.idxmin"
def generic(self, args, kws):
return nb_signature(self.this.index_type, recvr=self.this)
class GroupCorr(AbstractTemplate):
key = "GroupType.corr"
def generic(self, args, kws):
return nb_signature(types.float64, args[0], recvr=self.this)
@cuda_registry.register_attr
class GroupAttr(AttributeTemplate):
key = GroupType
resolve_max = _make_unary_attr("max")
resolve_min = _make_unary_attr("min")
resolve_sum = _make_unary_attr("sum")
resolve_mean = _make_unary_attr("mean")
resolve_var = _make_unary_attr("var")
resolve_std = _make_unary_attr("std")
resolve_size = _create_reduction_attr(
"GroupType.size", retty=group_size_type
)
resolve_count = _create_reduction_attr(
"GroupType.count", retty=types.int64
)
def resolve_idxmax(self, mod):
return types.BoundFunction(
GroupIdxMax, GroupType(mod.group_scalar_type, mod.index_type)
)
def resolve_idxmin(self, mod):
return types.BoundFunction(
GroupIdxMin, GroupType(mod.group_scalar_type, mod.index_type)
)
def resolve_corr(self, mod):
return types.BoundFunction(
GroupCorr, GroupType(mod.group_scalar_type, mod.index_type)
)
for ty in SUPPORTED_GROUPBY_NUMBA_TYPES:
_register_cuda_unary_reduction_caller("Max", ty, ty)
_register_cuda_unary_reduction_caller("Min", ty, ty)
_register_cuda_idx_reduction_caller("IdxMax", ty)
_register_cuda_idx_reduction_caller("IdxMin", ty)
if ty in types.integer_domain:
_register_cuda_binary_reduction_caller("Corr", ty, ty, types.float64)
_register_cuda_unary_reduction_caller("Sum", types.int32, types.int64)
_register_cuda_unary_reduction_caller("Sum", types.int64, types.int64)
_register_cuda_unary_reduction_caller("Sum", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Sum", types.float64, types.float64)
_register_cuda_unary_reduction_caller("Mean", types.int32, types.float64)
_register_cuda_unary_reduction_caller("Mean", types.int64, types.float64)
_register_cuda_unary_reduction_caller("Mean", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Mean", types.float64, types.float64)
_register_cuda_unary_reduction_caller("Std", types.int32, types.float64)
_register_cuda_unary_reduction_caller("Std", types.int64, types.float64)
_register_cuda_unary_reduction_caller("Std", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Std", types.float64, types.float64)
_register_cuda_unary_reduction_caller("Var", types.int32, types.float64)
_register_cuda_unary_reduction_caller("Var", types.int64, types.float64)
_register_cuda_unary_reduction_caller("Var", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Var", types.float64, types.float64)
for attr in ("group_data", "index", "size"):
make_attribute_wrapper(GroupType, attr, attr)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/api.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION.
class Masked:
"""
Most of the time, MaskedType as defined in typing.py
combined with the ops defined to operate on them are
enough to fulfill the obligations of DataFrame.apply
However sometimes we need to refer to an instance of
a masked scalar outside the context of a UDF like as
a global variable. To get numba to identify that var
a of type MaskedType and treat it as such we need to
have an actual python class we can tie to MaskedType
This is that class
"""
def __init__(self, value, valid):
self.value = value
self.valid = valid
def pack_return(masked_or_scalar):
# Blank function to give us something for the typing and
# lowering to grab onto. Just a dummy function for us to
# call within kernels that will get replaced later by the
# lowered implementation
pass
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/groupby_lowering.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from functools import partial
from numba import types
from numba.core import cgutils
from numba.core.extending import lower_builtin
from numba.core.typing import signature as nb_signature
from numba.cuda.cudaimpl import lower as cuda_lower
from cudf.core.udf.groupby_typing import (
SUPPORTED_GROUPBY_NUMBA_TYPES,
Group,
GroupType,
call_cuda_functions,
group_size_type,
index_default_type,
)
def group_reduction_impl_basic(context, builder, sig, args, function):
"""
Instruction boilerplate used for calling a groupby reduction
__device__ function. Centers around a forward declaration of
this function and adds the pre/post processing instructions
necessary for calling it.
"""
# return type
retty = sig.return_type
# a variable logically corresponding to the calling `Group`
grp = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
# what specific (numba) GroupType
grp_type = sig.args[0]
group_dataty = grp_type.group_data_type
# obtain the correct forward declaration from registry
type_key = (sig.return_type, grp_type.group_scalar_type)
func = call_cuda_functions[function][type_key]
# insert the forward declaration and return its result
# pass it the data pointer and the group's size
return context.compile_internal(
builder,
func,
nb_signature(retty, group_dataty, grp_type.group_size_type),
(grp.group_data, grp.size),
)
def group_corr(context, builder, sig, args):
"""
Instruction boilerplate used for calling a groupby correlation
"""
lhs_grp = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
rhs_grp = cgutils.create_struct_proxy(sig.args[1])(
context, builder, value=args[1]
)
device_func = call_cuda_functions["corr"][
(
sig.return_type,
sig.args[0].group_scalar_type,
sig.args[1].group_scalar_type,
)
]
result = context.compile_internal(
builder,
device_func,
nb_signature(
types.float64,
types.CPointer(
sig.args[0].group_scalar_type
), # this group calls corr
types.CPointer(
sig.args[1].group_scalar_type
), # this group is passed
group_size_type,
),
(
lhs_grp.group_data,
rhs_grp.group_data,
lhs_grp.size,
),
)
return result
@lower_builtin(Group, types.Array, group_size_type, types.Array)
def group_constructor(context, builder, sig, args):
"""
Instruction boilerplate used for instantiating a Group
struct from a data pointer, an index pointer, and a size
"""
# a variable logically corresponding to the calling `Group`
grp = cgutils.create_struct_proxy(sig.return_type)(context, builder)
grp.group_data = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
).data
grp.index = cgutils.create_struct_proxy(sig.args[2])(
context, builder, value=args[2]
).data
grp.size = args[1]
return grp._getvalue()
def group_reduction_impl_idx_max_or_min(context, builder, sig, args, function):
"""
Instruction boilerplate used for calling a groupby reduction
__device__ function in the case where the function is either
`idxmax` or `idxmin`. See `group_reduction_impl_basic` for
details. This lowering differs from other reductions due to
the presence of the index. This results in the forward
declaration expecting an extra arg.
"""
retty = sig.return_type
grp = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
grp_type = sig.args[0]
if grp_type.index_type != index_default_type:
raise TypeError(
f"Only inputs with default index dtype {index_default_type} "
"are supported."
)
type_key = (index_default_type, grp_type.group_scalar_type)
func = call_cuda_functions[function][type_key]
return context.compile_internal(
builder,
func,
nb_signature(
retty,
grp_type.group_data_type,
grp_type.group_index_type,
grp_type.group_size_type,
),
(grp.group_data, grp.index, grp.size),
)
cuda_Group_max = partial(group_reduction_impl_basic, function="max")
cuda_Group_min = partial(group_reduction_impl_basic, function="min")
cuda_Group_sum = partial(group_reduction_impl_basic, function="sum")
cuda_Group_mean = partial(group_reduction_impl_basic, function="mean")
cuda_Group_std = partial(group_reduction_impl_basic, function="std")
cuda_Group_var = partial(group_reduction_impl_basic, function="var")
cuda_Group_idxmax = partial(
group_reduction_impl_idx_max_or_min, function="idxmax"
)
cuda_Group_idxmin = partial(
group_reduction_impl_idx_max_or_min, function="idxmin"
)
def cuda_Group_size(context, builder, sig, args):
grp = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
return grp.size
cuda_Group_count = cuda_Group_size
for ty in SUPPORTED_GROUPBY_NUMBA_TYPES:
cuda_lower("GroupType.max", GroupType(ty))(cuda_Group_max)
cuda_lower("GroupType.min", GroupType(ty))(cuda_Group_min)
cuda_lower("GroupType.sum", GroupType(ty))(cuda_Group_sum)
cuda_lower("GroupType.count", GroupType(ty))(cuda_Group_count)
cuda_lower("GroupType.size", GroupType(ty))(cuda_Group_size)
cuda_lower("GroupType.mean", GroupType(ty))(cuda_Group_mean)
cuda_lower("GroupType.std", GroupType(ty))(cuda_Group_std)
cuda_lower("GroupType.var", GroupType(ty))(cuda_Group_var)
cuda_lower("GroupType.idxmax", GroupType(ty, types.int64))(
cuda_Group_idxmax
)
cuda_lower("GroupType.idxmin", GroupType(ty, types.int64))(
cuda_Group_idxmin
)
cuda_lower("GroupType.corr", GroupType(ty), GroupType(ty))(group_corr)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/strings_lowering.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
import operator
from functools import partial
from numba import cuda, types
from numba.core import cgutils
from numba.core.datamodel import default_manager
from numba.core.typing import signature as nb_signature
from numba.cuda.cudaimpl import (
lower as cuda_lower,
registry as cuda_lowering_registry,
)
from cudf._lib.strings_udf import (
get_character_cases_table_ptr,
get_character_flags_table_ptr,
get_special_case_mapping_table_ptr,
)
from cudf.core.udf.masked_typing import MaskedType
from cudf.core.udf.strings_typing import size_type, string_view, udf_string
_STR_VIEW_PTR = types.CPointer(string_view)
_UDF_STRING_PTR = types.CPointer(udf_string)
# CUDA function declarations
# read-only (input is a string_view, output is a fixed with type)
_string_view_len = cuda.declare_device("len", size_type(_STR_VIEW_PTR))
_concat_string_view = cuda.declare_device(
"concat", types.void(_UDF_STRING_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR)
)
_string_view_replace = cuda.declare_device(
"replace",
types.void(_UDF_STRING_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR),
)
def _declare_binary_func(lhs, rhs, out, name):
# Declare a binary function
return cuda.declare_device(
name,
out(lhs, rhs),
)
def _declare_strip_func(name):
return cuda.declare_device(
name, size_type(_UDF_STRING_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR)
)
# A binary function of the form f(string, string) -> bool
_declare_bool_str_str_func = partial(
_declare_binary_func, _STR_VIEW_PTR, _STR_VIEW_PTR, types.boolean
)
_declare_size_type_str_str_func = partial(
_declare_binary_func, _STR_VIEW_PTR, _STR_VIEW_PTR, size_type
)
_string_view_contains = _declare_bool_str_str_func("contains")
_string_view_eq = _declare_bool_str_str_func("eq")
_string_view_ne = _declare_bool_str_str_func("ne")
_string_view_ge = _declare_bool_str_str_func("ge")
_string_view_le = _declare_bool_str_str_func("le")
_string_view_gt = _declare_bool_str_str_func("gt")
_string_view_lt = _declare_bool_str_str_func("lt")
_string_view_startswith = _declare_bool_str_str_func("startswith")
_string_view_endswith = _declare_bool_str_str_func("endswith")
_string_view_find = _declare_size_type_str_str_func("find")
_string_view_rfind = _declare_size_type_str_str_func("rfind")
_string_view_contains = _declare_bool_str_str_func("contains")
_string_view_strip = _declare_strip_func("strip")
_string_view_lstrip = _declare_strip_func("lstrip")
_string_view_rstrip = _declare_strip_func("rstrip")
# A binary function of the form f(string, int) -> bool
_declare_bool_str_int_func = partial(
_declare_binary_func, _STR_VIEW_PTR, types.int64, types.boolean
)
def _declare_upper_or_lower(func):
return cuda.declare_device(
func,
types.void(
_UDF_STRING_PTR,
_STR_VIEW_PTR,
types.uintp,
types.uintp,
types.uintp,
),
)
_string_view_isdigit = _declare_bool_str_int_func("pyisdigit")
_string_view_isalnum = _declare_bool_str_int_func("pyisalnum")
_string_view_isalpha = _declare_bool_str_int_func("pyisalpha")
_string_view_isdecimal = _declare_bool_str_int_func("pyisdecimal")
_string_view_isnumeric = _declare_bool_str_int_func("pyisnumeric")
_string_view_isspace = _declare_bool_str_int_func("pyisspace")
_string_view_isupper = _declare_bool_str_int_func("pyisupper")
_string_view_islower = _declare_bool_str_int_func("pyislower")
_string_view_istitle = _declare_bool_str_int_func("pyistitle")
_string_view_upper = _declare_upper_or_lower("upper")
_string_view_lower = _declare_upper_or_lower("lower")
_string_view_count = cuda.declare_device(
"pycount",
size_type(_STR_VIEW_PTR, _STR_VIEW_PTR),
)
# casts
@cuda_lowering_registry.lower_cast(types.StringLiteral, string_view)
def cast_string_literal_to_string_view(context, builder, fromty, toty, val):
"""
Cast a literal to a string_view
"""
# create an empty string_view
sv = cgutils.create_struct_proxy(string_view)(context, builder)
# set the empty strview data pointer to point to the literal value
sv.data = context.insert_string_const_addrspace(
builder, fromty.literal_value
)
sv.length = context.get_constant(size_type, len(fromty.literal_value))
sv.bytes = context.get_constant(
size_type, len(fromty.literal_value.encode("UTF-8"))
)
return sv._getvalue()
@cuda_lowering_registry.lower_cast(string_view, udf_string)
def cast_string_view_to_udf_string(context, builder, fromty, toty, val):
sv_ptr = builder.alloca(default_manager[fromty].get_value_type())
udf_str_ptr = builder.alloca(default_manager[toty].get_value_type())
builder.store(val, sv_ptr)
_ = context.compile_internal(
builder,
call_create_udf_string_from_string_view,
nb_signature(types.void, _STR_VIEW_PTR, types.CPointer(udf_string)),
(sv_ptr, udf_str_ptr),
)
result = cgutils.create_struct_proxy(udf_string)(
context, builder, value=builder.load(udf_str_ptr)
)
return result._getvalue()
@cuda_lowering_registry.lower_cast(udf_string, string_view)
def cast_udf_string_to_string_view(context, builder, fromty, toty, val):
udf_str_ptr = builder.alloca(default_manager[fromty].get_value_type())
sv_ptr = builder.alloca(default_manager[toty].get_value_type())
builder.store(val, udf_str_ptr)
context.compile_internal(
builder,
call_create_string_view_from_udf_string,
nb_signature(types.void, _UDF_STRING_PTR, _STR_VIEW_PTR),
(udf_str_ptr, sv_ptr),
)
result = cgutils.create_struct_proxy(string_view)(
context, builder, value=builder.load(sv_ptr)
)
return result._getvalue()
# utilities
_create_udf_string_from_string_view = cuda.declare_device(
"udf_string_from_string_view",
types.void(_STR_VIEW_PTR, _UDF_STRING_PTR),
)
_create_string_view_from_udf_string = cuda.declare_device(
"string_view_from_udf_string",
types.void(_UDF_STRING_PTR, _STR_VIEW_PTR),
)
def call_create_udf_string_from_string_view(sv, udf_str):
_create_udf_string_from_string_view(sv, udf_str)
def call_create_string_view_from_udf_string(udf_str, sv):
_create_string_view_from_udf_string(udf_str, sv)
# String function implementations
def call_len_string_view(st):
return _string_view_len(st)
@cuda_lower(len, string_view)
def len_impl(context, builder, sig, args):
sv_ptr = builder.alloca(args[0].type)
builder.store(args[0], sv_ptr)
result = context.compile_internal(
builder,
call_len_string_view,
nb_signature(size_type, _STR_VIEW_PTR),
(sv_ptr,),
)
return result
def call_concat_string_view(result, lhs, rhs):
return _concat_string_view(result, lhs, rhs)
@cuda_lower(operator.add, string_view, string_view)
def concat_impl(context, builder, sig, args):
lhs_ptr = builder.alloca(args[0].type)
rhs_ptr = builder.alloca(args[1].type)
builder.store(args[0], lhs_ptr)
builder.store(args[1], rhs_ptr)
udf_str_ptr = builder.alloca(default_manager[udf_string].get_value_type())
_ = context.compile_internal(
builder,
call_concat_string_view,
types.void(_UDF_STRING_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR),
(udf_str_ptr, lhs_ptr, rhs_ptr),
)
result = cgutils.create_struct_proxy(udf_string)(
context, builder, value=builder.load(udf_str_ptr)
)
return result._getvalue()
def call_string_view_replace(result, src, to_replace, replacement):
return _string_view_replace(result, src, to_replace, replacement)
@cuda_lower("StringView.replace", string_view, string_view, string_view)
@cuda_lower("UDFString.replace", string_view, string_view, string_view)
def replace_impl(context, builder, sig, args):
src_ptr = builder.alloca(args[0].type)
to_replace_ptr = builder.alloca(args[1].type)
replacement_ptr = builder.alloca(args[2].type)
builder.store(args[0], src_ptr)
builder.store(args[1], to_replace_ptr),
builder.store(args[2], replacement_ptr)
udf_str_ptr = builder.alloca(default_manager[udf_string].get_value_type())
_ = context.compile_internal(
builder,
call_string_view_replace,
types.void(
_UDF_STRING_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR
),
(udf_str_ptr, src_ptr, to_replace_ptr, replacement_ptr),
)
result = cgutils.create_struct_proxy(udf_string)(
context, builder, value=builder.load(udf_str_ptr)
)
return result._getvalue()
def create_binary_string_func(binary_func, retty):
"""
Provide a wrapper around numba's low-level extension API which
produces the boilerplate needed to implement a binary function
of two strings.
"""
def deco(cuda_func):
@cuda_lower(binary_func, string_view, string_view)
def binary_func_impl(context, builder, sig, args):
lhs_ptr = builder.alloca(args[0].type)
rhs_ptr = builder.alloca(args[1].type)
builder.store(args[0], lhs_ptr)
builder.store(args[1], rhs_ptr)
# these conditional statements should compile out
if retty != udf_string:
# binary function of two strings yielding a fixed-width type
# example: str.startswith(other) -> bool
# shim functions can return the value through nb_retval
result = context.compile_internal(
builder,
cuda_func,
nb_signature(retty, _STR_VIEW_PTR, _STR_VIEW_PTR),
(lhs_ptr, rhs_ptr),
)
return result
else:
# binary function of two strings yielding a new string
# example: str.strip(other) -> str
# shim functions can not return a struct due to C linkage
# so we create a new udf_string and pass a pointer to it
# for the shim function to write the output to. The return
# value of compile_internal is therefore discarded (although
# this may change in the future if we need to return error
# codes, for instance).
udf_str_ptr = builder.alloca(
default_manager[udf_string].get_value_type()
)
_ = context.compile_internal(
builder,
cuda_func,
size_type(_UDF_STRING_PTR, _STR_VIEW_PTR, _STR_VIEW_PTR),
(udf_str_ptr, lhs_ptr, rhs_ptr),
)
result = cgutils.create_struct_proxy(udf_string)(
context, builder, value=builder.load(udf_str_ptr)
)
return result._getvalue()
# binary_func can be attribute-like: str.binary_func
# or operator-like: binary_func(str, other)
if isinstance(binary_func, str):
binary_func_impl = cuda_lower(
f"StringView.{binary_func}", string_view, string_view
)(binary_func_impl)
binary_func_impl = cuda_lower(
f"UDFString.{binary_func}", string_view, string_view
)(binary_func_impl)
else:
binary_func_impl = cuda_lower(
binary_func, string_view, string_view
)(binary_func_impl)
return binary_func_impl
return deco
@create_binary_string_func(operator.contains, types.boolean)
def contains_impl(st, substr):
return _string_view_contains(st, substr)
@create_binary_string_func(operator.eq, types.boolean)
def eq_impl(st, rhs):
return _string_view_eq(st, rhs)
@create_binary_string_func(operator.ne, types.boolean)
def ne_impl(st, rhs):
return _string_view_ne(st, rhs)
@create_binary_string_func(operator.ge, types.boolean)
def ge_impl(st, rhs):
return _string_view_ge(st, rhs)
@create_binary_string_func(operator.le, types.boolean)
def le_impl(st, rhs):
return _string_view_le(st, rhs)
@create_binary_string_func(operator.gt, types.boolean)
def gt_impl(st, rhs):
return _string_view_gt(st, rhs)
@create_binary_string_func(operator.lt, types.boolean)
def lt_impl(st, rhs):
return _string_view_lt(st, rhs)
@create_binary_string_func("strip", udf_string)
def strip_impl(result, to_strip, strip_char):
return _string_view_strip(result, to_strip, strip_char)
@create_binary_string_func("lstrip", udf_string)
def lstrip_impl(result, to_strip, strip_char):
return _string_view_lstrip(result, to_strip, strip_char)
@create_binary_string_func("rstrip", udf_string)
def rstrip_impl(result, to_strip, strip_char):
return _string_view_rstrip(result, to_strip, strip_char)
@create_binary_string_func("startswith", types.boolean)
def startswith_impl(sv, substr):
return _string_view_startswith(sv, substr)
@create_binary_string_func("endswith", types.boolean)
def endswith_impl(sv, substr):
return _string_view_endswith(sv, substr)
@create_binary_string_func("count", size_type)
def count_impl(st, substr):
return _string_view_count(st, substr)
@create_binary_string_func("find", size_type)
def find_impl(sv, substr):
return _string_view_find(sv, substr)
@create_binary_string_func("rfind", size_type)
def rfind_impl(sv, substr):
return _string_view_rfind(sv, substr)
def create_unary_identifier_func(id_func):
"""
Provide a wrapper around numba's low-level extension API which
produces the boilerplate needed to implement a unary function
of a string.
"""
def deco(cuda_func):
@cuda_lower(f"StringView.{id_func}", string_view)
@cuda_lower(f"UDFString.{id_func}", string_view)
def id_func_impl(context, builder, sig, args):
str_ptr = builder.alloca(args[0].type)
builder.store(args[0], str_ptr)
# Lookup table required for conversion functions
# must be resolved at runtime after context initialization,
# therefore cannot be a global variable
tbl_ptr = context.get_constant(
types.uintp, get_character_flags_table_ptr()
)
result = context.compile_internal(
builder,
cuda_func,
nb_signature(types.boolean, _STR_VIEW_PTR, types.uintp),
(str_ptr, tbl_ptr),
)
return result
return id_func_impl
return deco
def create_upper_or_lower(id_func):
"""
Provide a wrapper around numba's low-level extension API which
produces the boilerplate needed to implement either the upper
or lower attrs of a string view.
"""
def deco(cuda_func):
@cuda_lower(f"StringView.{id_func}", string_view)
@cuda_lower(f"UDFString.{id_func}", string_view)
def id_func_impl(context, builder, sig, args):
str_ptr = builder.alloca(args[0].type)
builder.store(args[0], str_ptr)
# Lookup table required for conversion functions
# must be resolved at runtime after context initialization,
# therefore cannot be a global variable
flags_tbl_ptr = context.get_constant(
types.uintp, get_character_flags_table_ptr()
)
cases_tbl_ptr = context.get_constant(
types.uintp, get_character_cases_table_ptr()
)
special_tbl_ptr = context.get_constant(
types.uintp, get_special_case_mapping_table_ptr()
)
udf_str_ptr = builder.alloca(
default_manager[udf_string].get_value_type()
)
_ = context.compile_internal(
builder,
cuda_func,
types.void(
_UDF_STRING_PTR,
_STR_VIEW_PTR,
types.uintp,
types.uintp,
types.uintp,
),
(
udf_str_ptr,
str_ptr,
flags_tbl_ptr,
cases_tbl_ptr,
special_tbl_ptr,
),
)
result = cgutils.create_struct_proxy(udf_string)(
context, builder, value=builder.load(udf_str_ptr)
)
return result._getvalue()
return id_func_impl
return deco
@create_upper_or_lower("upper")
def upper_impl(result, st, flags, cases, special):
return _string_view_upper(result, st, flags, cases, special)
@create_upper_or_lower("lower")
def lower_impl(result, st, flags, cases, special):
return _string_view_lower(result, st, flags, cases, special)
@create_unary_identifier_func("isdigit")
def isdigit_impl(st, tbl):
return _string_view_isdigit(st, tbl)
@create_unary_identifier_func("isalnum")
def isalnum_impl(st, tbl):
return _string_view_isalnum(st, tbl)
@create_unary_identifier_func("isalpha")
def isalpha_impl(st, tbl):
return _string_view_isalpha(st, tbl)
@create_unary_identifier_func("isnumeric")
def isnumeric_impl(st, tbl):
return _string_view_isnumeric(st, tbl)
@create_unary_identifier_func("isdecimal")
def isdecimal_impl(st, tbl):
return _string_view_isdecimal(st, tbl)
@create_unary_identifier_func("isspace")
def isspace_impl(st, tbl):
return _string_view_isspace(st, tbl)
@create_unary_identifier_func("isupper")
def isupper_impl(st, tbl):
return _string_view_isupper(st, tbl)
@create_unary_identifier_func("islower")
def islower_impl(st, tbl):
return _string_view_islower(st, tbl)
@create_unary_identifier_func("istitle")
def istitle_impl(st, tbl):
return _string_view_istitle(st, tbl)
@cuda_lower(len, MaskedType(string_view))
@cuda_lower(len, MaskedType(udf_string))
def masked_len_impl(context, builder, sig, args):
ret = cgutils.create_struct_proxy(sig.return_type)(context, builder)
masked_sv_ty = sig.args[0]
masked_sv = cgutils.create_struct_proxy(masked_sv_ty)(
context, builder, value=args[0]
)
result = len_impl(
context, builder, size_type(string_view), (masked_sv.value,)
)
ret.value = result
ret.valid = masked_sv.valid
return ret._getvalue()
def _masked_proxies(context, builder, maskedty, *args):
return tuple(
cgutils.create_struct_proxy(maskedty)(context, builder, value=arg)
for arg in args
)
@cuda_lower(
"MaskedType.replace",
MaskedType(string_view),
MaskedType(string_view),
MaskedType(string_view),
)
def masked_string_view_replace_impl(context, builder, sig, args):
ret = cgutils.create_struct_proxy(sig.return_type)(context, builder)
src_masked, to_replace_masked, replacement_masked = _masked_proxies(
context, builder, MaskedType(string_view), *args
)
result = replace_impl(
context,
builder,
nb_signature(udf_string, string_view, string_view, string_view),
(src_masked.value, to_replace_masked.value, replacement_masked.value),
)
ret.value = result
ret.valid = builder.and_(
builder.and_(src_masked.valid, to_replace_masked.valid),
replacement_masked.valid,
)
return ret._getvalue()
def create_masked_binary_string_func(op, cuda_func, retty):
"""
Provide a wrapper around numba's low-level extension API which
produces the boilerplate needed to implement a binary function
of two masked strings.
"""
def masked_binary_func_impl(context, builder, sig, args):
ret = cgutils.create_struct_proxy(sig.return_type)(context, builder)
lhs_masked = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
rhs_masked = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[1]
)
result = cuda_func(
context,
builder,
nb_signature(retty, string_view, string_view),
(lhs_masked.value, rhs_masked.value),
)
ret.value = result
ret.valid = builder.and_(lhs_masked.valid, rhs_masked.valid)
return ret._getvalue()
cuda_lower(op, MaskedType(string_view), MaskedType(string_view))(
masked_binary_func_impl
)
def create_masked_unary_identifier_func(op, cuda_func):
"""
Provide a wrapper around numba's low-level extension API which
produces the boilerplate needed to implement a unary function
of a masked string.
"""
def masked_unary_func_impl(context, builder, sig, args):
ret = cgutils.create_struct_proxy(sig.return_type)(context, builder)
masked_str = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
result = cuda_func(
context,
builder,
types.boolean(string_view, string_view),
(masked_str.value,),
)
ret.value = result
ret.valid = masked_str.valid
return ret._getvalue()
cuda_lower(op, MaskedType(string_view))(masked_unary_func_impl)
def create_masked_upper_or_lower(op, cuda_func):
def upper_or_lower_impl(context, builder, sig, args):
ret = cgutils.create_struct_proxy(sig.return_type)(context, builder)
masked_str = cgutils.create_struct_proxy(sig.args[0])(
context, builder, value=args[0]
)
result = cuda_func(
context,
builder,
udf_string(string_view),
(masked_str.value,),
)
ret.value = result
ret.valid = masked_str.valid
return ret._getvalue()
cuda_lower(op, MaskedType(string_view))(upper_or_lower_impl)
create_masked_binary_string_func("MaskedType.strip", strip_impl, udf_string)
create_masked_binary_string_func("MaskedType.lstrip", lstrip_impl, udf_string)
create_masked_binary_string_func("MaskedType.rstrip", rstrip_impl, udf_string)
create_masked_binary_string_func(
"MaskedType.startswith",
startswith_impl,
types.boolean,
)
create_masked_binary_string_func(
"MaskedType.endswith", endswith_impl, types.boolean
)
create_masked_binary_string_func("MaskedType.find", find_impl, size_type)
create_masked_binary_string_func("MaskedType.rfind", rfind_impl, size_type)
create_masked_binary_string_func("MaskedType.count", count_impl, size_type)
create_masked_binary_string_func(
operator.contains, contains_impl, types.boolean
)
create_masked_unary_identifier_func("MaskedType.isalnum", isalnum_impl)
create_masked_unary_identifier_func("MaskedType.isalpha", isalpha_impl)
create_masked_unary_identifier_func("MaskedType.isdigit", isdigit_impl)
create_masked_unary_identifier_func("MaskedType.isupper", isupper_impl)
create_masked_unary_identifier_func("MaskedType.islower", islower_impl)
create_masked_unary_identifier_func("MaskedType.isspace", isspace_impl)
create_masked_unary_identifier_func("MaskedType.isdecimal", isdecimal_impl)
create_masked_unary_identifier_func("MaskedType.istitle", istitle_impl)
create_masked_upper_or_lower("MaskedType.upper", upper_impl)
create_masked_upper_or_lower("MaskedType.lower", lower_impl)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/groupby_utils.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
import cupy as cp
import numpy as np
from numba import cuda, types
from numba.core.errors import TypingError
from numba.cuda.cudadrv.devices import get_context
from numba.np import numpy_support
import cudf.core.udf.utils
from cudf.core.udf.groupby_typing import (
SUPPORTED_GROUPBY_NUMPY_TYPES,
Group,
GroupType,
)
from cudf.core.udf.templates import (
group_initializer_template,
groupby_apply_kernel_template,
)
from cudf.core.udf.utils import (
Row,
_compile_or_get,
_get_extensionty_size,
_get_kernel,
_get_udf_return_type,
_supported_cols_from_frame,
_supported_dtypes_from_frame,
)
from cudf.utils._numba import _CUDFNumbaConfig
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
def _get_frame_groupby_type(dtype, index_dtype):
"""
Get the Numba type corresponding to a row of grouped data. Models the
column as a Record-like data structure containing GroupTypes. See
numba.np.numpy_support.from_struct_dtype for details.
Parameters
----------
level : np.dtype
A numpy structured array dtype associating field names
to scalar dtypes
index_dtype : np.dtype
A numpy scalar dtype associated with the index of the
incoming grouped data
"""
# Create the numpy structured type corresponding to the numpy dtype.
fields = []
offset = 0
sizes = [val[0].itemsize for val in dtype.fields.values()]
for i, (name, info) in enumerate(dtype.fields.items()):
elemdtype = info[0]
title = info[2] if len(info) == 3 else None
ty = numpy_support.from_dtype(elemdtype)
indexty = numpy_support.from_dtype(index_dtype)
groupty = GroupType(ty, indexty)
infos = {
"type": groupty,
"offset": offset,
"title": title,
}
fields.append((name, infos))
offset += _get_extensionty_size(groupty)
# Align the next member of the struct to be a multiple of the
# memory access size, per PTX ISA 7.4/5.4.5
if i < len(sizes) - 1:
alignment = offset % 8
if alignment != 0:
offset += 8 - alignment
# Numba requires that structures are aligned for the CUDA target
_is_aligned_struct = True
return Row(fields, offset, _is_aligned_struct)
def _groupby_apply_kernel_string_from_template(frame, args):
"""
Function to write numba kernels for `Groupby.apply` as a string.
Workaround until numba supports functions that use `*args`
"""
# Create argument list for kernel
frame = _supported_cols_from_frame(
frame, supported_types=SUPPORTED_GROUPBY_NUMPY_TYPES
)
input_columns = ", ".join([f"input_col_{i}" for i in range(len(frame))])
extra_args = ", ".join([f"extra_arg_{i}" for i in range(len(args))])
# Generate the initializers for each device function argument
initializers = []
for i, colname in enumerate(frame.keys()):
initializers.append(
group_initializer_template.format(idx=i, name=colname)
)
return groupby_apply_kernel_template.format(
input_columns=input_columns,
extra_args=extra_args,
group_initializers="\n".join(initializers),
)
def _get_groupby_apply_kernel(frame, func, args):
np_field_types = np.dtype(
list(
_supported_dtypes_from_frame(
frame, supported_types=SUPPORTED_GROUPBY_NUMPY_TYPES
).items()
)
)
dataframe_group_type = _get_frame_groupby_type(
np_field_types, frame.index.dtype
)
return_type = _get_udf_return_type(dataframe_group_type, func, args)
# Dict of 'local' variables into which `_kernel` is defined
global_exec_context = {
"cuda": cuda,
"Group": Group,
"dataframe_group_type": dataframe_group_type,
"types": types,
}
kernel_string = _groupby_apply_kernel_string_from_template(frame, args)
kernel = _get_kernel(kernel_string, global_exec_context, None, func)
return kernel, return_type
@_cudf_nvtx_annotate
def jit_groupby_apply(offsets, grouped_values, function, *args):
"""
Main entrypoint for JIT Groupby.apply via Numba.
Parameters
----------
offsets : list
A list of integers denoting the indices of the group
boundaries in grouped_values
grouped_values : DataFrame
A DataFrame representing the source data
sorted by group keys
function : callable
The user-defined function to execute
"""
kernel, return_type = _compile_or_get(
grouped_values,
function,
args,
kernel_getter=_get_groupby_apply_kernel,
suffix="__GROUPBY_APPLY_UDF",
)
offsets = cp.asarray(offsets)
ngroups = len(offsets) - 1
output = cudf.core.column.column_empty(ngroups, dtype=return_type)
launch_args = [
offsets,
output,
grouped_values.index,
]
launch_args += list(
_supported_cols_from_frame(
grouped_values, supported_types=SUPPORTED_GROUPBY_NUMPY_TYPES
).values()
)
launch_args += list(args)
max_group_size = cp.diff(offsets).max()
if max_group_size >= 256:
blocklim = 256
else:
blocklim = ((max_group_size + 32 - 1) // 32) * 32
if kernel.specialized:
specialized = kernel
else:
specialized = kernel.specialize(*launch_args)
# Ask the driver to give a good config
ctx = get_context()
# Dispatcher is specialized, so there's only one definition - get
# it so we can get the cufunc from the code library
(kern_def,) = specialized.overloads.values()
grid, tpb = ctx.get_max_potential_block_size(
func=kern_def._codelibrary.get_cufunc(),
b2d_func=0,
memsize=0,
blocksizelimit=int(blocklim),
)
# Launch kernel
with _CUDFNumbaConfig():
specialized[ngroups, tpb](*launch_args)
return output
def _can_be_jitted(frame, func, args):
"""
Determine if this UDF is supported through the JIT engine
by attempting to compile just the function to PTX using the
target set of types
"""
if not hasattr(func, "__code__"):
# Numba requires bytecode to be present to proceed.
# See https://github.com/numba/numba/issues/4587
return False
np_field_types = np.dtype(
list(
_supported_dtypes_from_frame(
frame, supported_types=SUPPORTED_GROUPBY_NUMPY_TYPES
).items()
)
)
dataframe_group_type = _get_frame_groupby_type(
np_field_types, frame.index.dtype
)
try:
_get_udf_return_type(dataframe_group_type, func, args)
return True
except TypingError:
return False
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/__init__.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from . import (
groupby_lowering,
groupby_typing,
masked_lowering,
masked_typing,
strings_lowering,
strings_typing,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/utils.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import os
from typing import Any, Callable, Dict
import cachetools
import cupy as cp
import llvmlite.binding as ll
import numpy as np
from cuda import cudart
from numba import cuda, typeof
from numba.core.datamodel import default_manager, models
from numba.core.errors import TypingError
from numba.core.extending import register_model
from numba.np import numpy_support
from numba.types import CPointer, Poison, Record, Tuple, boolean, int64, void
import rmm
from cudf._lib.strings_udf import (
column_from_udf_string_array,
column_to_string_view_array,
)
from cudf.api.types import is_scalar
from cudf.core.column.column import as_column
from cudf.core.dtypes import dtype
from cudf.core.udf.masked_typing import MaskedType
from cudf.core.udf.strings_typing import (
str_view_arg_handler,
string_view,
udf_string,
)
from cudf.utils import cudautils
from cudf.utils._numba import _get_ptx_file
from cudf.utils.dtypes import (
BOOL_TYPES,
DATETIME_TYPES,
NUMERIC_TYPES,
STRING_TYPES,
TIMEDELTA_TYPES,
)
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import initfunc
# Maximum size of a string column is 2 GiB
_STRINGS_UDF_DEFAULT_HEAP_SIZE = os.environ.get(
"STRINGS_UDF_HEAP_SIZE", 2**31
)
_heap_size = 0
_cudf_str_dtype = dtype(str)
JIT_SUPPORTED_TYPES = (
NUMERIC_TYPES
| BOOL_TYPES
| DATETIME_TYPES
| TIMEDELTA_TYPES
| STRING_TYPES
)
libcudf_bitmask_type = numpy_support.from_dtype(np.dtype("int32"))
MASK_BITSIZE = np.dtype("int32").itemsize * 8
precompiled: cachetools.LRUCache = cachetools.LRUCache(maxsize=32)
launch_arg_getters: Dict[Any, Any] = {}
_PTX_FILE = _get_ptx_file(os.path.dirname(__file__), "shim_")
@_cudf_nvtx_annotate
def _get_udf_return_type(argty, func: Callable, args=()):
"""
Get the return type of a masked UDF for a given set of argument dtypes. It
is assumed that the function consumes a dictionary whose keys are strings
and whose values are of MaskedType. Initially assume that the UDF may be
written to utilize any field in the row - including those containing an
unsupported dtype. If an unsupported dtype is actually used in the function
the compilation should fail at `compile_udf`. If compilation succeeds, one
can infer that the function does not use any of the columns of unsupported
dtype - meaning we can drop them going forward and the UDF will still end
up getting fed rows containing all the fields it actually needs to use to
compute the answer for that row.
"""
# present a row containing all fields to the UDF and try and compile
compile_sig = (argty, *(typeof(arg) for arg in args))
# Get the return type. The PTX is also returned by compile_udf, but is not
# needed here.
ptx, output_type = cudautils.compile_udf(func, compile_sig)
if not isinstance(output_type, MaskedType):
numba_output_type = numpy_support.from_dtype(np.dtype(output_type))
else:
numba_output_type = output_type
result = (
numba_output_type
if not isinstance(numba_output_type, MaskedType)
else numba_output_type.value_type
)
result = result if result.is_internal else result.return_type
# _get_udf_return_type will throw a TypingError if the user tries to use
# a field in the row containing an unsupported dtype, except in the
# edge case where all the function does is return that element:
# def f(row):
# return row[<bad dtype key>]
# In this case numba is happy to return MaskedType(<bad dtype key>)
# because it relies on not finding overloaded operators for types to raise
# the exception, so we have to explicitly check for that case.
if isinstance(result, Poison):
raise TypingError(str(result))
return result
def _all_dtypes_from_frame(frame, supported_types=JIT_SUPPORTED_TYPES):
return {
colname: col.dtype
if str(col.dtype) in supported_types
else np.dtype("O")
for colname, col in frame._data.items()
}
def _supported_dtypes_from_frame(frame, supported_types=JIT_SUPPORTED_TYPES):
return {
colname: col.dtype
for colname, col in frame._data.items()
if str(col.dtype) in supported_types
}
def _supported_cols_from_frame(frame, supported_types=JIT_SUPPORTED_TYPES):
return {
colname: col
for colname, col in frame._data.items()
if str(col.dtype) in supported_types
}
def _masked_array_type_from_col(col):
"""
Return a type representing a tuple of arrays,
the first element an array of the numba type
corresponding to `dtype`, and the second an
array of bools representing a mask.
"""
if col.dtype == _cudf_str_dtype:
col_type = CPointer(string_view)
else:
nb_scalar_ty = numpy_support.from_dtype(col.dtype)
col_type = nb_scalar_ty[::1]
if col.mask is None:
return col_type
else:
return Tuple((col_type, libcudf_bitmask_type[::1]))
def _construct_signature(frame, return_type, args):
"""
Build the signature of numba types that will be used to
actually JIT the kernel itself later, accounting for types
and offsets. Skips columns with unsupported dtypes.
"""
if not return_type.is_internal:
return_type = CPointer(return_type)
else:
return_type = return_type[::1]
# Tuple of arrays, first the output data array, then the mask
return_type = Tuple((return_type, boolean[::1]))
offsets = []
sig = [return_type, int64]
for col in _supported_cols_from_frame(frame).values():
sig.append(_masked_array_type_from_col(col))
offsets.append(int64)
# return_type, size, data, masks, offsets, extra args
sig = void(*(sig + offsets + [typeof(arg) for arg in args]))
return sig
class Row(Record):
# Numba's Record type provides a convenient abstraction for representing a
# row, in that it provides a mapping from strings (column / field names) to
# types. However, it cannot be used directly since it assumes that all its
# fields can be converted to NumPy types by Numba's internal conversion
# mechanism (`numba.np_support.as_dtype). This is not the case for cuDF
# extension types that might be the column types (e.g. masked types, string
# types or group types).
#
# We use this type for type inference and type checking, but not in code
# generation. For this use case, it is sufficient to provide a dtype for a
# row that corresponds to any Python object.
@property
def dtype(self):
return np.dtype("object")
register_model(Row)(models.RecordModel)
@cuda.jit(device=True)
def _mask_get(mask, pos):
"""Return the validity of mask[pos] as a word."""
return (mask[pos // MASK_BITSIZE] >> (pos % MASK_BITSIZE)) & 1
def _generate_cache_key(frame, func: Callable, args, suffix="__APPLY_UDF"):
"""Create a cache key that uniquely identifies a compilation.
A new compilation is needed any time any of the following things change:
- The UDF itself as defined in python by the user
- The types of the columns utilized by the UDF
- The existence of the input columns masks
"""
scalar_argtypes = tuple(typeof(arg) for arg in args)
return (
*cudautils.make_cache_key(
func, tuple(_all_dtypes_from_frame(frame).values())
),
*(col.mask is None for col in frame._data.values()),
*frame._data.keys(),
scalar_argtypes,
suffix,
)
@_cudf_nvtx_annotate
def _compile_or_get(
frame, func, args, kernel_getter=None, suffix="__APPLY_UDF"
):
"""
Return a compiled kernel in terms of MaskedTypes that launches a
kernel equivalent of `f` for the dtypes of `df`. The kernel uses
a thread for each row and calls `f` using that rows data / mask
to produce an output value and output validity for each row.
If the UDF has already been compiled for this requested dtypes,
a cached version will be returned instead of running compilation.
CUDA kernels are void and do not return values. Thus, we need to
preallocate a column of the correct dtype and pass it in as one of
the kernel arguments. This creates a chicken-and-egg problem where
we need the column type to compile the kernel, but normally we would
be getting that type FROM compiling the kernel (and letting numba
determine it as a return value). As a workaround, we compile the UDF
itself outside the final kernel to invoke a full typing pass, which
unfortunately is difficult to do without running full compilation.
we then obtain the return type from that separate compilation and
use it to allocate an output column of the right dtype.
"""
if not all(is_scalar(arg) for arg in args):
raise TypeError("only scalar valued args are supported by apply")
# check to see if we already compiled this function
cache_key = _generate_cache_key(frame, func, args, suffix=suffix)
if precompiled.get(cache_key) is not None:
kernel, masked_or_scalar = precompiled[cache_key]
return kernel, masked_or_scalar
# precompile the user udf to get the right return type.
# could be a MaskedType or a scalar type.
kernel, scalar_return_type = kernel_getter(frame, func, args)
np_return_type = (
numpy_support.as_dtype(scalar_return_type)
if scalar_return_type.is_internal
else scalar_return_type.np_dtype
)
precompiled[cache_key] = (kernel, np_return_type)
return kernel, np_return_type
def _get_kernel(kernel_string, globals_, sig, func):
"""Template kernel compilation helper function."""
f_ = cuda.jit(device=True)(func)
globals_["f_"] = f_
exec(kernel_string, globals_)
_kernel = globals_["_kernel"]
kernel = cuda.jit(
sig, link=[_PTX_FILE], extensions=[str_view_arg_handler]
)(_kernel)
return kernel
def _get_input_args_from_frame(fr):
args = []
offsets = []
for col in _supported_cols_from_frame(fr).values():
if col.dtype == _cudf_str_dtype:
data = column_to_string_view_array_init_heap(col)
else:
data = col.data
if col.mask is not None:
# argument is a tuple of data, mask
args.append((data, col.mask))
else:
# argument is just the data pointer
args.append(data)
offsets.append(col.offset)
return args + offsets
def _return_arr_from_dtype(dtype, size):
if dtype == _cudf_str_dtype:
return rmm.DeviceBuffer(size=size * _get_extensionty_size(udf_string))
return cp.empty(size, dtype=dtype)
def _post_process_output_col(col, retty):
if retty == _cudf_str_dtype:
return column_from_udf_string_array(col)
return as_column(col, retty)
# The only supported data layout in NVVM.
# See: https://docs.nvidia.com/cuda/nvvm-ir-spec/index.html?#data-layout
_nvvm_data_layout = (
"e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
"i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-"
"v64:64:64-v128:128:128-n16:32:64"
)
def _get_extensionty_size(ty):
"""
Return the size of an extension type in bytes
"""
target_data = ll.create_target_data(_nvvm_data_layout)
llty = default_manager[ty].get_value_type()
return llty.get_abi_size(target_data)
@initfunc
def set_malloc_heap_size(size=None):
"""
Heap size control for strings_udf, size in bytes.
"""
global _heap_size
if size is None:
size = _STRINGS_UDF_DEFAULT_HEAP_SIZE
if size != _heap_size:
(ret,) = cudart.cudaDeviceSetLimit(
cudart.cudaLimit.cudaLimitMallocHeapSize, size
)
if ret.value != 0:
raise RuntimeError("Unable to set cudaMalloc heap size")
_heap_size = size
def column_to_string_view_array_init_heap(col):
# lazily allocate heap only when a string needs to be returned
return column_to_string_view_array(col)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/strings_typing.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
import operator
import numpy as np
from numba import types
from numba.core.extending import models, register_model
from numba.core.typing import signature as nb_signature
from numba.core.typing.templates import AbstractTemplate, AttributeTemplate
from numba.cuda.cudadecl import registry as cuda_decl_registry
import rmm
# libcudf size_type
size_type = types.int32
# String object definitions
class UDFString(types.Type):
np_dtype = np.dtype("object")
def __init__(self):
super().__init__(name="udf_string")
@property
def return_type(self):
return self
class StringView(types.Type):
np_dtype = np.dtype("object")
def __init__(self):
super().__init__(name="string_view")
@property
def return_type(self):
return UDFString()
@register_model(StringView)
class stringview_model(models.StructModel):
# from string_view.hpp:
_members = (
# const char* _data{}
# Pointer to device memory contain char array for this string
("data", types.CPointer(types.char)),
# size_type _bytes{};
# Number of bytes in _data for this string
("bytes", size_type),
# mutable size_type _length{};
# Number of characters in this string (computed)
("length", size_type),
)
def __init__(self, dmm, fe_type):
super().__init__(dmm, fe_type, self._members)
@register_model(UDFString)
class udf_string_model(models.StructModel):
# from udf_string.hpp:
# private:
# char* m_data{};
# cudf::size_type m_bytes{};
# cudf::size_type m_size{};
_members = (
("m_data", types.CPointer(types.char)),
("m_bytes", size_type),
("m_size", size_type),
)
def __init__(self, dmm, fe_type):
super().__init__(dmm, fe_type, self._members)
any_string_ty = (StringView, UDFString, types.StringLiteral)
string_view = StringView()
udf_string = UDFString()
class StrViewArgHandler:
"""
As part of Numba's preprocessing step, incoming function arguments are
modified based on the associated type for that argument that was used
to JIT the kernel. However it only knows how to handle built in array
types natively. With string UDFs, the jitted type is string_view*,
which numba does not know how to handle.
This class converts string_view* to raw pointer arguments, which Numba
knows how to use.
See numba.cuda.compiler._prepare_args for details.
"""
def prepare_args(self, ty, val, **kwargs):
if isinstance(ty, types.CPointer) and isinstance(
ty.dtype, (StringView, UDFString)
):
return types.uint64, val.ptr if isinstance(
val, rmm._lib.device_buffer.DeviceBuffer
) else val.get_ptr(mode="read")
else:
return ty, val
str_view_arg_handler = StrViewArgHandler()
# String functions
@cuda_decl_registry.register_global(len)
class StringLength(AbstractTemplate):
"""
provide the length of a cudf::string_view like struct
"""
def generic(self, args, kws):
if isinstance(args[0], any_string_ty) and len(args) == 1:
# length:
# string_view -> int32
# udf_string -> int32
# literal -> int32
return nb_signature(size_type, string_view)
def register_stringview_binaryop(op, retty):
"""
Helper function wrapping numba's low level extension API. Provides
the boilerplate needed to associate a signature with a function or
operator expecting a string.
"""
class StringViewBinaryOp(AbstractTemplate):
def generic(self, args, kws):
if isinstance(args[0], any_string_ty) and isinstance(
args[1], any_string_ty
):
return nb_signature(retty, string_view, string_view)
cuda_decl_registry.register_global(op)(StringViewBinaryOp)
def create_binary_attr(attrname, retty):
"""
Helper function wrapping numba's low level extension API. Provides
the boilerplate needed to register a binary function of two string
objects as an attribute of one, e.g. `string.func(other)`.
"""
class StringViewBinaryAttr(AbstractTemplate):
key = f"StringView.{attrname}"
def generic(self, args, kws):
return nb_signature(retty, string_view, recvr=self.this)
def attr(self, mod):
return types.BoundFunction(StringViewBinaryAttr, string_view)
return attr
def create_identifier_attr(attrname, retty):
"""
Helper function wrapping numba's low level extension API. Provides
the boilerplate needed to register a unary function of a string
object as an attribute, e.g. `string.func()`.
"""
class StringViewIdentifierAttr(AbstractTemplate):
key = f"StringView.{attrname}"
def generic(self, args, kws):
return nb_signature(retty, recvr=self.this)
def attr(self, mod):
return types.BoundFunction(StringViewIdentifierAttr, string_view)
return attr
class StringViewCount(AbstractTemplate):
key = "StringView.count"
def generic(self, args, kws):
return nb_signature(size_type, string_view, recvr=self.this)
class StringViewReplace(AbstractTemplate):
key = "StringView.replace"
def generic(self, args, kws):
return nb_signature(
udf_string, string_view, string_view, recvr=self.this
)
class StringViewAttrs(AttributeTemplate):
key = string_view
def resolve_count(self, mod):
return types.BoundFunction(StringViewCount, string_view)
def resolve_replace(self, mod):
return types.BoundFunction(StringViewReplace, string_view)
bool_binary_funcs = ["startswith", "endswith"]
int_binary_funcs = ["find", "rfind"]
id_unary_funcs = [
"isalpha",
"isalnum",
"isdecimal",
"isdigit",
"isupper",
"islower",
"isspace",
"isnumeric",
"istitle",
]
string_unary_funcs = ["upper", "lower"]
string_return_attrs = ["strip", "lstrip", "rstrip"]
for func in bool_binary_funcs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_binary_attr(func, types.boolean),
)
for func in string_return_attrs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_binary_attr(func, udf_string),
)
for func in int_binary_funcs:
setattr(
StringViewAttrs, f"resolve_{func}", create_binary_attr(func, size_type)
)
for func in id_unary_funcs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_identifier_attr(func, types.boolean),
)
for func in string_unary_funcs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_identifier_attr(func, udf_string),
)
@cuda_decl_registry.register_attr
class UDFStringAttrs(StringViewAttrs):
key = udf_string
cuda_decl_registry.register_attr(StringViewAttrs)
cuda_decl_registry.register_attr(UDFStringAttrs)
register_stringview_binaryop(operator.eq, types.boolean)
register_stringview_binaryop(operator.ne, types.boolean)
register_stringview_binaryop(operator.lt, types.boolean)
register_stringview_binaryop(operator.gt, types.boolean)
register_stringview_binaryop(operator.le, types.boolean)
register_stringview_binaryop(operator.ge, types.boolean)
# st in other
register_stringview_binaryop(operator.contains, types.boolean)
# st + other
register_stringview_binaryop(operator.add, udf_string)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/udf/masked_typing.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import operator
import numpy as np
from numba import types
from numba.core.extending import (
make_attribute_wrapper,
models,
register_model,
typeof_impl,
)
from numba.core.typing import signature as nb_signature
from numba.core.typing.templates import (
AbstractTemplate,
AttributeTemplate,
ConcreteTemplate,
)
from numba.core.typing.typeof import typeof
from numba.cuda.cudadecl import registry as cuda_decl_registry
from numba.np.numpy_support import from_dtype
from cudf.core.missing import NA
from cudf.core.udf import api
from cudf.core.udf._ops import (
arith_ops,
bitwise_ops,
comparison_ops,
unary_ops,
)
from cudf.core.udf.strings_typing import (
StringView,
UDFString,
bool_binary_funcs,
id_unary_funcs,
int_binary_funcs,
size_type,
string_return_attrs,
string_unary_funcs,
string_view,
udf_string,
)
from cudf.utils.dtypes import (
DATETIME_TYPES,
NUMERIC_TYPES,
STRING_TYPES,
TIMEDELTA_TYPES,
)
SUPPORTED_NUMPY_TYPES = (
NUMERIC_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | STRING_TYPES
)
supported_type_str = "\n".join(sorted(list(SUPPORTED_NUMPY_TYPES) + ["bool"]))
_units = ["ns", "ms", "us", "s"]
_datetime_cases = {types.NPDatetime(u) for u in _units}
_timedelta_cases = {types.NPTimedelta(u) for u in _units}
_supported_masked_types = (
types.integer_domain
| types.real_domain
| _datetime_cases
| _timedelta_cases
| {types.boolean}
| {string_view, udf_string}
)
SUPPORTED_NUMBA_TYPES = (
types.Number,
types.Boolean,
types.NPDatetime,
types.NPTimedelta,
StringView,
UDFString,
)
def _format_error_string(err):
"""
Wrap an error message in newlines and color it red.
"""
return "\033[91m" + "\n" + err + "\n" + "\033[0m"
def _type_to_masked_type(t):
if isinstance(t, SUPPORTED_NUMBA_TYPES):
return t
else:
# Unsupported Dtype. Numba tends to print out the type info
# for whatever operands and operation failed to type and then
# output its own error message. Putting the message in the repr
# then is one way of getting the true cause to the user
err = _format_error_string(
"Unsupported MaskedType. This is usually caused by "
"attempting to use a column of unsupported dtype in a UDF. "
f"Supported dtypes are:\n{supported_type_str}"
)
return types.Poison(err)
# Masked scalars of all types
class MaskedType(types.Type):
"""
A Numba type consisting of a value of some primitive type
and a validity boolean, over which we can define math ops
"""
def __init__(self, value):
# MaskedType in Numba shall be parameterized
# with a value type
self.value_type = _type_to_masked_type(value)
super().__init__(name=f"Masked({self.value_type})")
def __hash__(self):
"""
Needed so that numba caches type instances with different
`value_type` separately.
"""
return hash(repr(self))
def unify(self, context, other):
"""
Often within a UDF an instance arises where a variable could
be a `MaskedType`, an `NAType`, or a literal based off
the data at runtime, for example the variable `ret` here:
def f(x):
if x == 1:
ret = x
elif x > 2:
ret = 1
else:
ret = cudf.NA
return ret
When numba analyzes this function it will eventually figure
out that the variable `ret` could be any of the three types
from above. This scenario will only work if numba knows how
to find some kind of common type between the possibilities,
and this function implements that - the goal is to return a
common type when comparing `self` to other.
"""
# If we have Masked and NA, the output should be a
# MaskedType with the original type as its value_type
if isinstance(other, NAType):
return self
# two MaskedType unify to a new MaskedType whose value_type
# is the result of unifying `self` and `other` `value_type`
elif isinstance(other, MaskedType):
return MaskedType(
context.unify_pairs(self.value_type, other.value_type)
)
# if we have MaskedType and something that results in a
# scalar, unify between the MaskedType's value_type
# and that other thing
unified = context.unify_pairs(self.value_type, other)
if unified is None:
# The value types don't unify, so there is no unified masked type
return None
return MaskedType(unified)
def __eq__(self, other):
# Equality is required for determining whether a cast is required
# between two different types.
if not isinstance(other, MaskedType):
# Require a cast when the other type is not masked
return False
# Require a cast for another masked with a different value type
return self.value_type == other.value_type
# For typing a Masked constant value defined outside a kernel (e.g. captured in
# a closure).
@typeof_impl.register(api.Masked)
def typeof_masked(val, c):
return MaskedType(typeof(val.value))
# Implemented typing for Masked(value, valid) - the construction of a Masked
# type in a kernel.
@cuda_decl_registry.register
class MaskedConstructor(ConcreteTemplate):
key = api.Masked
cases = [
nb_signature(MaskedType(t), t, types.boolean)
for t in _supported_masked_types
]
# Typing for `api.Masked`
@cuda_decl_registry.register_attr
class ClassesTemplate(AttributeTemplate):
key = types.Module(api)
def resolve_Masked(self, mod):
return types.Function(MaskedConstructor)
# Registration of the global is also needed for Numba to type api.Masked
cuda_decl_registry.register_global(api, types.Module(api))
# For typing bare Masked (as in `from .api import Masked`
cuda_decl_registry.register_global(
api.Masked, types.Function(MaskedConstructor)
)
# Provide access to `m.value` and `m.valid` in a kernel for a Masked `m`.
make_attribute_wrapper(MaskedType, "value", "value")
make_attribute_wrapper(MaskedType, "valid", "valid")
# Tell numba how `MaskedType` is constructed on the backend in terms
# of primitive things that exist at the LLVM level
@register_model(MaskedType)
class MaskedModel(models.StructModel):
def __init__(self, dmm, fe_type):
# This struct has two members, a value and a validity
# let the type of the `value` field be the same as the
# `value_type` and let `valid` be a boolean
members = [("value", fe_type.value_type), ("valid", types.bool_)]
models.StructModel.__init__(self, dmm, fe_type, members)
class NAType(types.Type):
"""
A type for handling ops against nulls
Exists so we can:
1. Teach numba that all occurrences of `cudf.NA` are
to be read as instances of this type instead
2. Define ops like `if x is cudf.NA` where `x` is of
type `Masked` to mean `if x.valid is False`
"""
def __init__(self):
super().__init__(name="NA")
def unify(self, context, other):
"""
Masked <-> NA is deferred to MaskedType.unify()
Literal <-> NA -> Masked
"""
if isinstance(other, MaskedType):
# bounce to MaskedType.unify
return None
elif isinstance(other, NAType):
# unify {NA, NA} -> NA
return self
else:
return MaskedType(other)
na_type = NAType()
@typeof_impl.register(type(NA))
def typeof_na(val, c):
"""
Tie instances of _NAType (cudf.NA) to our NAType.
Effectively make it so numba sees `cudf.NA` as an
instance of this NAType -> handle it accordingly.
"""
return na_type
register_model(NAType)(models.OpaqueModel)
# Ultimately, we want numba to produce PTX code that specifies how to implement
# an operation on two singular `Masked` structs together, which is defined
# as producing a new `Masked` with the right validity and if valid,
# the correct value. This happens in two phases:
# 1. Specify that `Masked` <op> `Masked` exists and what it should return
# 2. Implement how to actually do (1) at the LLVM level
# The following code accomplishes (1) - it is really just a way of specifying
# that the <op> has a CUDA overload that accepts two `Masked` that
# are parameterized with `value_type` and what flavor of `Masked` to return.
class MaskedScalarArithOp(AbstractTemplate):
def generic(self, args, kws):
"""
Typing for `Masked` <op> `Masked`
Numba expects a valid numba type to be returned if typing is successful
else `None` signifies the error state (this pattern is commonly used
in Numba)
"""
if isinstance(args[0], MaskedType) and isinstance(args[1], MaskedType):
# In the case of op(Masked, Masked), the return type is a Masked
# such that Masked.value is the primitive type that would have
# been resolved if we were just operating on the
# `value_type`s.
return_type = self.context.resolve_function_type(
self.key, (args[0].value_type, args[1].value_type), kws
).return_type
return nb_signature(MaskedType(return_type), args[0], args[1])
class MaskedScalarUnaryOp(AbstractTemplate):
def generic(self, args, kws):
if len(args) == 1 and isinstance(args[0], MaskedType):
return_type = self.context.resolve_function_type(
self.key, (args[0].value_type,), kws
).return_type
return nb_signature(MaskedType(return_type), args[0])
class MaskedScalarNullOp(AbstractTemplate):
def generic(self, args, kws):
"""
Typing for `Masked` + `NA`
Handles situations like `x + cudf.NA`
"""
if isinstance(args[0], MaskedType) and isinstance(args[1], NAType):
# In the case of op(Masked, NA), the result has the same
# dtype as the original regardless of what it is
return nb_signature(
args[0],
args[0],
na_type,
)
elif isinstance(args[0], NAType) and isinstance(args[1], MaskedType):
return nb_signature(args[1], na_type, args[1])
class MaskedScalarScalarOp(AbstractTemplate):
def generic(self, args, kws):
"""
Typing for `Masked` <op> a scalar (and vice-versa).
handles situations like `x + 1`
"""
# In the case of op(Masked, scalar), we resolve the type between
# the Masked value_type and the scalar's type directly
to_resolve_types = None
if isinstance(args[0], MaskedType) and isinstance(
args[1], SUPPORTED_NUMBA_TYPES
):
to_resolve_types = (args[0].value_type, args[1])
elif isinstance(args[0], SUPPORTED_NUMBA_TYPES) and isinstance(
args[1], MaskedType
):
to_resolve_types = (args[1].value_type, args[0])
else:
# fail typing
return None
return_type = self.context.resolve_function_type(
self.key, to_resolve_types, kws
).return_type
return nb_signature(
MaskedType(return_type),
args[0],
args[1],
)
@cuda_decl_registry.register_global(operator.is_)
class MaskedScalarIsNull(AbstractTemplate):
"""
Typing for `Masked is cudf.NA`
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType) and isinstance(args[1], NAType):
return nb_signature(types.boolean, args[0], na_type)
elif isinstance(args[1], MaskedType) and isinstance(args[0], NAType):
return nb_signature(types.boolean, na_type, args[1])
@cuda_decl_registry.register_global(operator.truth)
class MaskedScalarTruth(AbstractTemplate):
"""
Typing for `if Masked`
Used for `if x > y`
The truthiness of a MaskedType shall be the truthiness
of the `value` stored therein
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
return nb_signature(types.boolean, MaskedType(types.boolean))
@cuda_decl_registry.register_global(float)
class MaskedScalarFloatCast(AbstractTemplate):
"""
Typing for float(Masked)
returns the result of calling "float" on the input
TODO: retains the validity of the input rather than
raising as in float(pd.NA)
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
# following numpy convention np.dtype(float) -> dtype('float64')
return nb_signature(MaskedType(types.float64), args[0])
@cuda_decl_registry.register_global(int)
class MaskedScalarIntCast(AbstractTemplate):
"""
Typing for int(Masked)
returns the result of calling "int" on the input
TODO: retains the validity of the input rather than
raising as in int(pd.NA)
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
# following numpy convention np.dtype(int) -> dtype('int64')
return nb_signature(MaskedType(types.int64), args[0])
@cuda_decl_registry.register_global(abs)
class MaskedScalarAbsoluteValue(AbstractTemplate):
"""
Typing for the builtin function abs. Returns the same
type as input except for boolean values which are converted
to integer.
This follows the expected result from the builtin abs function
which differs from numpy - np.abs returns a bool whereas abs
itself performs the cast.
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
if isinstance(args[0].value_type, (StringView, UDFString)):
# reject string types
return
else:
return_type = self.context.resolve_function_type(
self.key, (args[0].value_type,), kws
).return_type
if return_type in types.signed_domain:
# promote to unsigned to avoid overflow
return_type = from_dtype(np.dtype("u" + return_type.name))
return nb_signature(MaskedType(return_type), args[0])
@cuda_decl_registry.register_global(api.pack_return)
class UnpackReturnToMasked(AbstractTemplate):
"""
Turn a returned MaskedType into its value and validity
or turn a scalar into the tuple (scalar, True).
"""
def generic(self, args, kws):
if isinstance(args[0], MaskedType):
# MaskedType(dtype, valid) -> MaskedType(dtype, valid)
return nb_signature(args[0], args[0])
elif isinstance(args[0], SUPPORTED_NUMBA_TYPES):
# scalar_type -> MaskedType(scalar_type, True)
return_type = MaskedType(args[0])
return nb_signature(return_type, args[0])
for binary_op in arith_ops + bitwise_ops + comparison_ops:
# Every op shares the same typing class
cuda_decl_registry.register_global(binary_op)(MaskedScalarArithOp)
cuda_decl_registry.register_global(binary_op)(MaskedScalarNullOp)
cuda_decl_registry.register_global(binary_op)(MaskedScalarScalarOp)
for unary_op in unary_ops:
cuda_decl_registry.register_global(unary_op)(MaskedScalarUnaryOp)
# Strings functions and utilities
def _is_valid_string_arg(ty):
return (
isinstance(ty, MaskedType)
and isinstance(ty.value_type, (StringView, UDFString))
) or isinstance(ty, types.StringLiteral)
def register_masked_string_function(func):
"""
Helper function wrapping numba's low level extension API. Provides
the boilerplate needed to associate a signature with a function or
operator to be overloaded.
"""
def deco(generic):
class MaskedStringFunction(AbstractTemplate):
pass
MaskedStringFunction.generic = generic
cuda_decl_registry.register_global(func)(MaskedStringFunction)
return deco
@register_masked_string_function(len)
def len_typing(self, args, kws):
if isinstance(args[0], MaskedType) and isinstance(
args[0].value_type, (StringView, UDFString)
):
return nb_signature(MaskedType(size_type), MaskedType(string_view))
elif isinstance(args[0], types.StringLiteral) and len(args) == 1:
return nb_signature(size_type, args[0])
@register_masked_string_function(operator.add)
def concat_typing(self, args, kws):
if _is_valid_string_arg(args[0]) and _is_valid_string_arg(args[1]):
return nb_signature(
MaskedType(udf_string),
MaskedType(string_view),
MaskedType(string_view),
)
@register_masked_string_function(operator.contains)
def contains_typing(self, args, kws):
if _is_valid_string_arg(args[0]) and _is_valid_string_arg(args[1]):
return nb_signature(
MaskedType(types.boolean),
MaskedType(string_view),
MaskedType(string_view),
)
class MaskedStringViewCmpOp(AbstractTemplate):
"""
return the boolean result of `cmpop` between to strings
since the typing is the same for every comparison operator,
we can reuse this class for all of them.
"""
def generic(self, args, kws):
if _is_valid_string_arg(args[0]) and _is_valid_string_arg(args[1]):
return nb_signature(
MaskedType(types.boolean),
MaskedType(string_view),
MaskedType(string_view),
)
for op in comparison_ops:
cuda_decl_registry.register_global(op)(MaskedStringViewCmpOp)
def create_masked_binary_attr(attrname, retty):
"""
Helper function wrapping numba's low level extension API. Provides
the boilerplate needed to register a binary function of two masked
string objects as an attribute of one, e.g. `string.func(other)`.
"""
class MaskedStringViewBinaryAttr(AbstractTemplate):
key = attrname
def generic(self, args, kws):
return nb_signature(
MaskedType(retty), MaskedType(string_view), recvr=self.this
)
def attr(self, mod):
return types.BoundFunction(
MaskedStringViewBinaryAttr,
MaskedType(string_view),
)
return attr
def create_masked_unary_attr(attrname, retty):
"""
Helper function wrapping numba's low level extension API. Provides
the boilerplate needed to register a unary function of a masked
string object as an attribute, e.g. `string.func()`.
"""
class MaskedStringViewIdentifierAttr(AbstractTemplate):
key = attrname
def generic(self, args, kws):
return nb_signature(MaskedType(retty), recvr=self.this)
def attr(self, mod):
return types.BoundFunction(
MaskedStringViewIdentifierAttr,
MaskedType(string_view),
)
return attr
class MaskedStringViewCount(AbstractTemplate):
key = "MaskedType.count"
def generic(self, args, kws):
return nb_signature(
MaskedType(size_type), MaskedType(string_view), recvr=self.this
)
class MaskedStringViewReplace(AbstractTemplate):
key = "MaskedType.replace"
def generic(self, args, kws):
return nb_signature(
MaskedType(udf_string),
MaskedType(string_view),
MaskedType(string_view),
recvr=self.this,
)
class MaskedStringViewAttrs(AttributeTemplate):
key = MaskedType(string_view)
def resolve_replace(self, mod):
return types.BoundFunction(
MaskedStringViewReplace, MaskedType(string_view)
)
def resolve_count(self, mod):
return types.BoundFunction(
MaskedStringViewCount, MaskedType(string_view)
)
def resolve_value(self, mod):
return string_view
def resolve_valid(self, mod):
return types.boolean
# Build attributes for `MaskedType(string_view)`
for func in bool_binary_funcs:
setattr(
MaskedStringViewAttrs,
f"resolve_{func}",
create_masked_binary_attr(f"MaskedType.{func}", types.boolean),
)
for func in int_binary_funcs:
setattr(
MaskedStringViewAttrs,
f"resolve_{func}",
create_masked_binary_attr(f"MaskedType.{func}", size_type),
)
for func in string_return_attrs:
setattr(
MaskedStringViewAttrs,
f"resolve_{func}",
create_masked_binary_attr(f"MaskedType.{func}", udf_string),
)
for func in id_unary_funcs:
setattr(
MaskedStringViewAttrs,
f"resolve_{func}",
create_masked_unary_attr(f"MaskedType.{func}", types.boolean),
)
for func in string_unary_funcs:
setattr(
MaskedStringViewAttrs,
f"resolve_{func}",
create_masked_unary_attr(f"MaskedType.{func}", udf_string),
)
class MaskedUDFStringAttrs(MaskedStringViewAttrs):
key = MaskedType(udf_string)
def resolve_value(self, mod):
return udf_string
cuda_decl_registry.register_attr(MaskedStringViewAttrs)
cuda_decl_registry.register_attr(MaskedUDFStringAttrs)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/window/rolling.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION
import itertools
import numba
import pandas as pd
from pandas.api.indexers import BaseIndexer
import cudf
from cudf import _lib as libcudf
from cudf.api.types import is_integer, is_number
from cudf.core import column
from cudf.core._compat import PANDAS_GE_150
from cudf.core.buffer import acquire_spill_lock
from cudf.core.column.column import as_column
from cudf.core.mixins import Reducible
from cudf.utils import cudautils
from cudf.utils.utils import GetAttrGetItemMixin
class Rolling(GetAttrGetItemMixin, Reducible):
"""
Rolling window calculations.
Parameters
----------
window : int, offset or a BaseIndexer subclass
Size of the window, i.e., the number of observations used
to calculate the statistic.
For datetime indexes, an offset can be provided instead
of an int. The offset must be convertible to a timedelta.
As opposed to a fixed window size, each window will be
sized to accommodate observations within the time period
specified by the offset.
If a BaseIndexer subclass is passed, calculates the window
boundaries based on the defined ``get_window_bounds`` method.
min_periods : int, optional
The minimum number of observations in the window that are
required to be non-null, so that the result is non-null.
If not provided or ``None``, ``min_periods`` is equal to
the window size.
center : bool, optional
If ``True``, the result is set at the center of the window.
If ``False`` (default), the result is set at the right edge
of the window.
Returns
-------
``Rolling`` object.
Examples
--------
>>> import cudf
>>> a = cudf.Series([1, 2, 3, None, 4])
Rolling sum with window size 2.
>>> print(a.rolling(2).sum())
0
1 3
2 5
3
4
dtype: int64
Rolling sum with window size 2 and min_periods 1.
>>> print(a.rolling(2, min_periods=1).sum())
0 1
1 3
2 5
3 3
4 4
dtype: int64
Rolling count with window size 3.
>>> print(a.rolling(3).count())
0 1
1 2
2 3
3 2
4 2
dtype: int64
Rolling count with window size 3, but with the result set at the
center of the window.
>>> print(a.rolling(3, center=True).count())
0 2
1 3
2 2
3 2
4 1 dtype: int64
Rolling max with variable window size specified by an offset;
only valid for datetime index.
>>> a = cudf.Series(
... [1, 9, 5, 4, np.nan, 1],
... index=[
... pd.Timestamp('20190101 09:00:00'),
... pd.Timestamp('20190101 09:00:01'),
... pd.Timestamp('20190101 09:00:02'),
... pd.Timestamp('20190101 09:00:04'),
... pd.Timestamp('20190101 09:00:07'),
... pd.Timestamp('20190101 09:00:08')
... ]
... )
>>> print(a.rolling('2s').max())
2019-01-01T09:00:00.000 1
2019-01-01T09:00:01.000 9
2019-01-01T09:00:02.000 9
2019-01-01T09:00:04.000 4
2019-01-01T09:00:07.000
2019-01-01T09:00:08.000 1
dtype: int64
Apply custom function on the window with the *apply* method
>>> import numpy as np
>>> import math
>>> b = cudf.Series([16, 25, 36, 49, 64, 81], dtype=np.float64)
>>> def some_func(A):
... b = 0
... for a in A:
... b = b + math.sqrt(a)
... return b
...
>>> print(b.rolling(3, min_periods=1).apply(some_func))
0 4.0
1 9.0
2 15.0
3 18.0
4 21.0
5 24.0
dtype: float64
And this also works for window rolling set by an offset
>>> import pandas as pd
>>> c = cudf.Series(
... [16, 25, 36, 49, 64, 81],
... index=[
... pd.Timestamp('20190101 09:00:00'),
... pd.Timestamp('20190101 09:00:01'),
... pd.Timestamp('20190101 09:00:02'),
... pd.Timestamp('20190101 09:00:04'),
... pd.Timestamp('20190101 09:00:07'),
... pd.Timestamp('20190101 09:00:08')
... ],
... dtype=np.float64
... )
>>> print(c.rolling('2s').apply(some_func))
2019-01-01T09:00:00.000 4.0
2019-01-01T09:00:01.000 9.0
2019-01-01T09:00:02.000 11.0
2019-01-01T09:00:04.000 7.0
2019-01-01T09:00:07.000 8.0
2019-01-01T09:00:08.000 17.0
dtype: float64
"""
_PROTECTED_KEYS = frozenset(("obj",))
_time_window = False
_VALID_REDUCTIONS = {
"sum",
"min",
"max",
"mean",
"var",
"std",
}
def __init__(
self,
obj,
window,
min_periods=None,
center=False,
axis=0,
win_type=None,
):
self.obj = obj
self.window = window
self.min_periods = min_periods
self.center = center
self._normalize()
self.agg_params = {}
if axis != 0:
raise NotImplementedError("axis != 0 is not supported yet.")
self.axis = axis
if win_type is not None:
if win_type != "boxcar":
raise NotImplementedError(
"Only the default win_type 'boxcar' is currently supported"
)
self.win_type = win_type
def __getitem__(self, arg):
if isinstance(arg, tuple):
arg = list(arg)
return self.obj[arg].rolling(
window=self.window,
min_periods=self.min_periods,
center=self.center,
)
def _apply_agg_column(self, source_column, agg_name):
min_periods = self.min_periods or 1
if isinstance(self.window, int):
preceding_window = None
following_window = None
window = self.window
elif isinstance(self.window, BaseIndexer):
if PANDAS_GE_150:
start, end = self.window.get_window_bounds(
num_values=len(self.obj),
min_periods=self.min_periods,
center=self.center,
closed=None,
step=None,
)
else:
start, end = self.window.get_window_bounds(
num_values=len(self.obj),
min_periods=self.min_periods,
center=self.center,
closed=None,
)
start = as_column(start, dtype="int32")
end = as_column(end, dtype="int32")
idx = cudf.core.column.arange(len(start))
preceding_window = (idx - start + cudf.Scalar(1, "int32")).astype(
"int32"
)
following_window = (end - idx - cudf.Scalar(1, "int32")).astype(
"int32"
)
window = None
else:
preceding_window = as_column(self.window)
following_window = column.full(
self.window.size, 0, dtype=self.window.dtype
)
window = None
return libcudf.rolling.rolling(
source_column=source_column,
pre_column_window=preceding_window,
fwd_column_window=following_window,
window=window,
min_periods=min_periods,
center=self.center,
op=agg_name,
agg_params=self.agg_params,
)
def _apply_agg_dataframe(self, df, agg_name):
return cudf.DataFrame._from_data(
{
col_name: self._apply_agg_column(col, agg_name)
for col_name, col in df._data.items()
},
index=df.index,
)
def _apply_agg(self, agg_name):
if isinstance(self.obj, cudf.Series):
return cudf.Series._from_data(
{
self.obj.name: self._apply_agg_column(
self.obj._column, agg_name
)
},
index=self.obj.index,
)
else:
return self._apply_agg_dataframe(self.obj, agg_name)
def _reduce(
self,
op: str,
*args,
**kwargs,
):
"""Calculate the rolling {op}.
Returns
-------
Series or DataFrame
Return type is the same as the original object.
"""
return self._apply_agg(op)
def var(self, ddof=1):
"""Calculate the rolling variance.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of
elements.
Returns
-------
Series or DataFrame
Return type is the same as the original object.
"""
self.agg_params["ddof"] = ddof
return self._apply_agg("var")
def std(self, ddof=1):
"""Calculate the rolling standard deviation.
Parameters
----------
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of
elements.
Returns
-------
Series or DataFrame
Return type is the same as the original object.
"""
self.agg_params["ddof"] = ddof
return self._apply_agg("std")
def count(self):
"""Calculate the rolling count of non NaN observations.
Returns
-------
Series or DataFrame
Return type is the same as the original object.
"""
return self._apply_agg("count")
def apply(self, func, *args, **kwargs):
"""
Calculate the rolling custom aggregation function.
Parameters
----------
func : function
A user defined function that takes an 1D array as input
args : tuple
unsupported.
kwargs
unsupported
See Also
--------
cudf.Series.apply: Apply an elementwise function to
transform the values in the Column.
Notes
-----
The supported Python features are listed in
https://numba.readthedocs.io/en/stable/cuda/cudapysupported.html
with these exceptions:
* Math functions in `cmath` are not supported since `libcudf` does not
have complex number support and output of `cmath` functions are most
likely complex numbers.
* These five functions in `math` are not supported since numba
generates multiple PTX functions from them:
* math.sin()
* math.cos()
* math.tan()
* math.gamma()
* math.lgamma()
* Series with string dtypes are not supported.
* Global variables need to be re-defined explicitly inside
the udf, as numba considers them to be compile-time constants
and there is no known way to obtain value of the global variable.
Examples
--------
>>> import cudf
>>> def count_if_gt_3(window):
... count = 0
... for i in window:
... if i > 3:
... count += 1
... return count
...
>>> s = cudf.Series([0, 1.1, 5.8, 3.1, 6.2, 2.0, 1.5])
>>> s.rolling(3, min_periods=1).apply(count_if_gt_3)
0 0
1 0
2 1
3 2
4 3
5 2
6 1
dtype: int64
"""
has_nulls = False
if isinstance(self.obj, cudf.Series):
if self.obj._column.has_nulls():
has_nulls = True
else:
for col in self.obj._data:
if self.obj[col].has_nulls:
has_nulls = True
if has_nulls:
raise NotImplementedError(
"Handling UDF with null values is not yet supported"
)
return self._apply_agg(func)
def _normalize(self):
"""
Normalize the *window* and *min_periods* args
*window* can be:
* An integer, in which case it is the window size.
If *min_periods* is unspecified, it is set to be equal to
the window size.
* A timedelta offset, in which case it is used to generate
a column of window sizes to use for each element.
If *min_periods* is unspecified, it is set to 1.
Only valid for datetime index.
"""
window, min_periods = self.window, self.min_periods
if is_number(window):
# only allow integers
if not is_integer(window):
raise ValueError("window must be an integer")
if window <= 0:
raise ValueError("window cannot be zero or negative")
if self.min_periods is None:
min_periods = window
else:
if isinstance(
window, (numba.cuda.devicearray.DeviceNDArray, BaseIndexer)
):
# window is a device_array of window sizes or BaseIndexer
self.window = window
self.min_periods = min_periods
return
if not isinstance(self.obj.index, cudf.core.index.DatetimeIndex):
raise ValueError(
"window must be an integer for non datetime index"
)
self._time_window = True
try:
window = pd.to_timedelta(window)
# to_timedelta will also convert np.arrays etc.,
if not isinstance(window, pd.Timedelta):
raise ValueError
window = window.to_timedelta64()
except ValueError as e:
raise ValueError(
"window must be integer or convertible to a timedelta"
) from e
if self.min_periods is None:
min_periods = 1
self.window = self._window_to_window_sizes(window)
self.min_periods = min_periods
def _window_to_window_sizes(self, window):
"""
For non-fixed width windows,
convert the window argument into window sizes.
"""
if is_integer(window):
return window
else:
with acquire_spill_lock():
return cudautils.window_sizes_from_offset(
self.obj.index._values.data_array_view(mode="write"),
window,
)
def __repr__(self):
return "{} [window={},min_periods={},center={}]".format(
self.__class__.__name__, self.window, self.min_periods, self.center
)
class RollingGroupby(Rolling):
"""
Grouped rolling window calculation.
See Also
--------
cudf.core.window.Rolling
"""
def __init__(self, groupby, window, min_periods=None, center=False):
sort_order = groupby.grouping.keys.argsort()
# TODO: there may be overlap between the columns
# of `groupby.grouping.keys` and `groupby.obj`.
# As an optimization, avoid gathering those twice.
self._group_keys = groupby.grouping.keys.take(sort_order)
obj = groupby.obj.drop(columns=groupby.grouping._named_columns).take(
sort_order
)
gb_size = groupby.size().sort_index()
self._group_starts = (
gb_size.cumsum().shift(1).fillna(0).repeat(gb_size)
)
super().__init__(obj, window, min_periods=min_periods, center=center)
@acquire_spill_lock()
def _window_to_window_sizes(self, window):
if is_integer(window):
return cudautils.grouped_window_sizes_from_offset(
column.arange(len(self.obj)).data_array_view(mode="read"),
self._group_starts,
window,
)
else:
return cudautils.grouped_window_sizes_from_offset(
self.obj.index._values.data_array_view(mode="read"),
self._group_starts,
window,
)
def _apply_agg(self, agg_name):
if agg_name == "count" and not self._time_window:
self.min_periods = 0
index = cudf.MultiIndex.from_frame(
cudf.DataFrame(
{
key: value
for key, value in itertools.chain(
self._group_keys._data.items(),
self.obj.index._data.items(),
)
}
)
)
result = super()._apply_agg(agg_name)
result.index = index
return result
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/window/__init__.py
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION
from cudf.core.window.rolling import Rolling
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/_internals/where.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
import warnings
from typing import Tuple, Union
import numpy as np
import cudf
from cudf._typing import ScalarLike
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
is_bool_dtype,
is_categorical_dtype,
is_scalar,
)
from cudf.core.column import ColumnBase
from cudf.utils.dtypes import (
_can_cast,
_dtype_can_hold_element,
find_common_type,
is_mixed_with_object_dtype,
)
def _normalize_categorical(input_col, other):
if isinstance(input_col, cudf.core.column.CategoricalColumn):
if cudf.api.types.is_scalar(other):
try:
other = input_col._encode(other)
except ValueError:
# When other is not present in categories,
# fill with Null.
other = None
other = cudf.Scalar(other, dtype=input_col.codes.dtype)
elif isinstance(other, cudf.core.column.CategoricalColumn):
other = other.codes
input_col = input_col.codes
return input_col, other
def _check_and_cast_columns_with_other(
source_col: ColumnBase,
other: Union[ScalarLike, ColumnBase],
inplace: bool,
) -> Tuple[ColumnBase, Union[ScalarLike, ColumnBase]]:
# Returns type-casted `source_col` & `other` based on `inplace`.
source_dtype = source_col.dtype
if is_categorical_dtype(source_dtype):
return _normalize_categorical(source_col, other)
other_is_scalar = is_scalar(other)
if other_is_scalar:
if (isinstance(other, float) and not np.isnan(other)) and (
source_dtype.type(other) != other
):
raise TypeError(
f"Cannot safely cast non-equivalent "
f"{type(other).__name__} to {source_dtype.name}"
)
if cudf.utils.utils.is_na_like(other):
return _normalize_categorical(
source_col, cudf.Scalar(other, dtype=source_dtype)
)
mixed_err = (
"cudf does not support mixed types, please type-cast the column of "
"dataframe/series and other to same dtypes."
)
if inplace:
other = cudf.Scalar(other) if other_is_scalar else other
if is_mixed_with_object_dtype(other, source_col):
raise TypeError(mixed_err)
if not _can_cast(other.dtype, source_dtype):
warnings.warn(
f"Type-casting from {other.dtype} "
f"to {source_dtype}, there could be potential data loss"
)
return _normalize_categorical(source_col, other.astype(source_dtype))
if _is_non_decimal_numeric_dtype(source_dtype) and _can_cast(
other, source_dtype
):
common_dtype = source_dtype
elif (
isinstance(source_col, cudf.core.column.NumericalColumn)
and other_is_scalar
and _dtype_can_hold_element(source_dtype, other)
):
common_dtype = source_dtype
else:
common_dtype = find_common_type(
[
source_dtype,
np.min_scalar_type(other) if other_is_scalar else other.dtype,
]
)
if other_is_scalar:
other = cudf.Scalar(other)
if is_mixed_with_object_dtype(other, source_col) or (
is_bool_dtype(source_col) and not is_bool_dtype(common_dtype)
):
raise TypeError(mixed_err)
other = other.astype(common_dtype)
return _normalize_categorical(source_col.astype(common_dtype), other)
def _make_categorical_like(result, column):
if isinstance(column, cudf.core.column.CategoricalColumn):
result = cudf.core.column.build_categorical_column(
categories=column.categories,
codes=cudf.core.column.build_column(
result.base_data, dtype=result.dtype
),
mask=result.base_mask,
size=result.size,
offset=result.offset,
ordered=column.ordered,
)
return result
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/_internals/timezones.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import os
import zoneinfo
from functools import lru_cache
from typing import Tuple, cast
import numpy as np
import pandas as pd
import cudf
from cudf._lib.labeling import label_bins
from cudf._lib.search import search_sorted
from cudf._lib.timezone import make_timezone_transition_table
from cudf.core.column.column import as_column, build_column
from cudf.core.column.datetime import DatetimeColumn, DatetimeTZColumn
from cudf.core.dataframe import DataFrame
from cudf.utils.dtypes import _get_base_dtype
@lru_cache(maxsize=20)
def get_tz_data(zone_name):
"""
Return timezone data (transition times and UTC offsets) for the
given IANA time zone.
Parameters
----------
zone_name: str
IANA time zone name
Returns
-------
DataFrame with two columns containing the transition times
("transition_times") and corresponding UTC offsets ("offsets").
"""
try:
# like zoneinfo, we first look in TZPATH
tz_table = _find_and_read_tzfile_tzpath(zone_name)
except zoneinfo.ZoneInfoNotFoundError:
# if that fails, we fall back to using `tzdata`
tz_table = _find_and_read_tzfile_tzdata(zone_name)
return tz_table
def _find_and_read_tzfile_tzpath(zone_name):
for search_path in zoneinfo.TZPATH:
if os.path.isfile(os.path.join(search_path, zone_name)):
return _read_tzfile_as_frame(search_path, zone_name)
raise zoneinfo.ZoneInfoNotFoundError(zone_name)
def _find_and_read_tzfile_tzdata(zone_name):
import importlib.resources
package_base = "tzdata.zoneinfo"
try:
return _read_tzfile_as_frame(
str(importlib.resources.files(package_base)), zone_name
)
# TODO: make it so that the call to libcudf raises a
# FileNotFoundError instead of a RuntimeError
except (ImportError, FileNotFoundError, UnicodeEncodeError, RuntimeError):
# the "except" part of this try-except is basically vendored
# from the zoneinfo library.
#
# There are three types of exception that can be raised that all amount
# to "we cannot find this key":
#
# ImportError: If package_name doesn't exist (e.g. if tzdata is not
# installed, or if there's an error in the folder name like
# Amrica/New_York)
# FileNotFoundError: If resource_name doesn't exist in the package
# (e.g. Europe/Krasnoy)
# UnicodeEncodeError: If package_name or resource_name are not UTF-8,
# such as keys containing a surrogate character.
raise zoneinfo.ZoneInfoNotFoundError(zone_name)
def _read_tzfile_as_frame(tzdir, zone_name):
transition_times_and_offsets = make_timezone_transition_table(
tzdir, zone_name
)
if not transition_times_and_offsets:
# this happens for UTC-like zones
min_date = np.int64(np.iinfo("int64").min + 1).astype("M8[s]")
transition_times_and_offsets = as_column([min_date]), as_column(
[np.timedelta64(0, "s")]
)
return DataFrame._from_columns(
transition_times_and_offsets, ["transition_times", "offsets"]
)
def _find_ambiguous_and_nonexistent(
data: DatetimeColumn, zone_name: str
) -> Tuple:
"""
Recognize ambiguous and nonexistent timestamps for the given timezone.
Returns a tuple of columns, both of "bool" dtype and of the same
size as `data`, that respectively indicate ambiguous and
nonexistent timestamps in `data` with the value `True`.
Ambiguous and/or nonexistent timestamps are only possible if any
transitions occur in the time zone database for the given timezone.
If no transitions occur, the tuple `(False, False)` is returned.
"""
tz_data_for_zone = get_tz_data(zone_name)
transition_times = tz_data_for_zone["transition_times"]
offsets = tz_data_for_zone["offsets"].astype(
f"timedelta64[{data._time_unit}]"
)
if len(offsets) == 1: # no transitions
return False, False
transition_times, offsets, old_offsets = (
transition_times[1:]._column,
offsets[1:]._column,
offsets[:-1]._column,
)
# Assume we have two clocks at the moment of transition:
# - Clock 1 is turned forward or backwards correctly
# - Clock 2 makes no changes
clock_1 = transition_times + offsets
clock_2 = transition_times + old_offsets
# At the start of an ambiguous time period, Clock 1 (which has
# been turned back) reads less than Clock 2:
cond = clock_1 < clock_2
ambiguous_begin = clock_1.apply_boolean_mask(cond)
# The end of an ambiguous time period is what Clock 2 reads at
# the moment of transition:
ambiguous_end = clock_2.apply_boolean_mask(cond)
ambiguous = label_bins(
data,
left_edges=ambiguous_begin,
left_inclusive=True,
right_edges=ambiguous_end,
right_inclusive=False,
).notnull()
# At the start of a non-existent time period, Clock 2 reads less
# than Clock 1 (which has been turned forward):
cond = clock_1 > clock_2
nonexistent_begin = clock_2.apply_boolean_mask(cond)
# The end of the non-existent time period is what Clock 1 reads
# at the moment of transition:
nonexistent_end = clock_1.apply_boolean_mask(cond)
nonexistent = label_bins(
data,
left_edges=nonexistent_begin,
left_inclusive=True,
right_edges=nonexistent_end,
right_inclusive=False,
).notnull()
return ambiguous, nonexistent
def localize(
data: DatetimeColumn, zone_name: str, ambiguous, nonexistent
) -> DatetimeTZColumn:
if ambiguous != "NaT":
raise NotImplementedError(
"Only ambiguous='NaT' is currently supported"
)
if nonexistent != "NaT":
raise NotImplementedError(
"Only nonexistent='NaT' is currently supported"
)
if isinstance(data, DatetimeTZColumn):
raise ValueError(
"Already localized. "
"Use `tz_convert` to convert between time zones."
)
dtype = pd.DatetimeTZDtype(data._time_unit, zone_name)
ambiguous, nonexistent = _find_ambiguous_and_nonexistent(data, zone_name)
localized = cast(
DatetimeColumn,
data._scatter_by_column(
data.isnull() | (ambiguous | nonexistent),
cudf.Scalar(cudf.NaT, dtype=data.dtype),
),
)
gmt_data = local_to_utc(localized, zone_name)
return cast(
DatetimeTZColumn,
build_column(
data=gmt_data.base_data,
dtype=dtype,
mask=localized.base_mask,
size=gmt_data.size,
offset=gmt_data.offset,
),
)
def delocalize(data: DatetimeColumn) -> DatetimeColumn:
"""
Convert a timezone-aware datetime column to a timezone-naive one.
If the column is already timezone-naive, return it as is.
"""
if isinstance(data, DatetimeTZColumn):
return data._local_time
# already timezone-naive:
return data
def convert(data: DatetimeTZColumn, zone_name: str) -> DatetimeTZColumn:
if not isinstance(data, DatetimeTZColumn):
raise TypeError(
"Cannot convert from timezone-naive timestamps to "
"timezone-aware timestamps. For that, "
"use `tz_localize`."
)
if zone_name == str(data.dtype.tz):
return data.copy()
utc_time = data._utc_time
out = cast(
DatetimeTZColumn,
build_column(
data=utc_time.base_data,
dtype=pd.DatetimeTZDtype(data._time_unit, zone_name),
mask=utc_time.base_mask,
size=utc_time.size,
offset=utc_time.offset,
),
)
return out
def utc_to_local(data: DatetimeColumn, zone_name: str) -> DatetimeColumn:
tz_data_for_zone = get_tz_data(zone_name)
transition_times, offsets = tz_data_for_zone._columns
transition_times = transition_times.astype(_get_base_dtype(data.dtype))
indices = search_sorted([transition_times], [data], "right") - 1
offsets_from_utc = offsets.take(indices, nullify=True)
return data + offsets_from_utc
def local_to_utc(data: DatetimeColumn, zone_name: str) -> DatetimeColumn:
tz_data_for_zone = get_tz_data(zone_name)
transition_times, offsets = tz_data_for_zone._columns
transition_times_local = (transition_times + offsets).astype(data.dtype)
indices = search_sorted([transition_times_local], [data], "right") - 1
offsets_to_utc = offsets.take(indices, nullify=True)
return data - offsets_to_utc
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/_internals/expressions.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
import ast
import functools
from typing import List, Tuple
from cudf._lib.expressions import (
ASTOperator,
ColumnReference,
Expression,
Literal,
Operation,
)
# This dictionary encodes the mapping from Python AST operators to their cudf
# counterparts.
python_cudf_operator_map = {
# Binary operators
ast.Add: ASTOperator.ADD,
ast.Sub: ASTOperator.SUB,
ast.Mult: ASTOperator.MUL,
ast.Div: ASTOperator.DIV,
ast.FloorDiv: ASTOperator.FLOOR_DIV,
ast.Mod: ASTOperator.PYMOD,
ast.Pow: ASTOperator.POW,
ast.Eq: ASTOperator.EQUAL,
ast.NotEq: ASTOperator.NOT_EQUAL,
ast.Lt: ASTOperator.LESS,
ast.Gt: ASTOperator.GREATER,
ast.LtE: ASTOperator.LESS_EQUAL,
ast.GtE: ASTOperator.GREATER_EQUAL,
ast.BitXor: ASTOperator.BITWISE_XOR,
# TODO: The mapping of logical/bitwise operators here is inconsistent with
# pandas. In pandas, Both `BitAnd` and `And` map to
# `ASTOperator.LOGICAL_AND` for booleans, while they map to
# `ASTOperator.BITWISE_AND` for integers. However, there is no good way to
# encode this at present because expressions can be arbitrarily nested so
# we won't know the dtype of the input without inserting a much more
# complex traversal of the expression tree to determine the output types at
# each node. For now, we'll rely on users to use the appropriate operator.
ast.BitAnd: ASTOperator.BITWISE_AND,
ast.BitOr: ASTOperator.BITWISE_OR,
ast.And: ASTOperator.LOGICAL_AND,
ast.Or: ASTOperator.LOGICAL_OR,
# Unary operators
ast.Invert: ASTOperator.BIT_INVERT,
ast.Not: ASTOperator.NOT,
# TODO: Missing USub, possibility other unary ops?
}
# Mapping between Python function names encode in an ast.Call node and the
# corresponding libcudf C++ AST operators.
python_cudf_function_map = {
# TODO: Operators listed on
# https://pandas.pydata.org/pandas-docs/stable/user_guide/enhancingperf.html#expression-evaluation-via-eval # noqa: E501
# that we don't support yet:
# expm1, log1p, arctan2 and log10.
"isnull": ASTOperator.IS_NULL,
"isna": ASTOperator.IS_NULL,
"sin": ASTOperator.SIN,
"cos": ASTOperator.COS,
"tan": ASTOperator.TAN,
"arcsin": ASTOperator.ARCSIN,
"arccos": ASTOperator.ARCCOS,
"arctan": ASTOperator.ARCTAN,
"sinh": ASTOperator.SINH,
"cosh": ASTOperator.COSH,
"tanh": ASTOperator.TANH,
"arcsinh": ASTOperator.ARCSINH,
"arccosh": ASTOperator.ARCCOSH,
"arctanh": ASTOperator.ARCTANH,
"exp": ASTOperator.EXP,
"log": ASTOperator.LOG,
"sqrt": ASTOperator.SQRT,
"abs": ASTOperator.ABS,
"ceil": ASTOperator.CEIL,
"floor": ASTOperator.FLOOR,
# TODO: Operators supported by libcudf with no Python function analog.
# ast.rint: ASTOperator.RINT,
# ast.cbrt: ASTOperator.CBRT,
}
class libcudfASTVisitor(ast.NodeVisitor):
"""A NodeVisitor specialized for constructing a libcudf expression tree.
This visitor is designed to handle AST nodes that have libcudf equivalents.
It constructs column references from names and literals from constants,
then builds up operations. The final result can be accessed using the
`expression` property. The visitor must be kept in scope for as long as the
expression is needed because all of the underlying libcudf expressions will
be destroyed when the libcudfASTVisitor is.
Parameters
----------
col_names : Tuple[str]
The column names used to map the names in an expression.
"""
def __init__(self, col_names: Tuple[str]):
self.stack: List[Expression] = []
self.nodes: List[Expression] = []
self.col_names = col_names
@property
def expression(self):
"""Expression: The result of parsing an AST."""
assert len(self.stack) == 1
return self.stack[-1]
def visit_Name(self, node):
try:
col_id = self.col_names.index(node.id)
except ValueError:
raise ValueError(f"Unknown column name {node.id}")
self.stack.append(ColumnReference(col_id))
def visit_Constant(self, node):
if not isinstance(node, (ast.Num, ast.Str)):
raise ValueError(
f"Unsupported literal {repr(node.value)} of type "
"{type(node.value).__name__}"
)
self.stack.append(Literal(node.value))
def visit_UnaryOp(self, node):
self.visit(node.operand)
self.nodes.append(self.stack.pop())
if isinstance(node.op, ast.USub):
# TODO: Except for leaf nodes, we won't know the type of the
# operand, so there's no way to know whether this should be a float
# or an int. We should maybe see what Spark does, and this will
# probably require casting.
self.nodes.append(Literal(-1))
op = ASTOperator.MUL
self.stack.append(Operation(op, self.nodes[-1], self.nodes[-2]))
elif isinstance(node.op, ast.UAdd):
self.stack.append(self.nodes[-1])
else:
op = python_cudf_operator_map[type(node.op)]
self.stack.append(Operation(op, self.nodes[-1]))
def visit_BinOp(self, node):
self.visit(node.left)
self.visit(node.right)
self.nodes.append(self.stack.pop())
self.nodes.append(self.stack.pop())
op = python_cudf_operator_map[type(node.op)]
self.stack.append(Operation(op, self.nodes[-1], self.nodes[-2]))
def _visit_BoolOp_Compare(self, operators, operands, has_multiple_ops):
# Helper function handling the common components of parsing BoolOp and
# Compare AST nodes. These two types of nodes both support chaining
# (e.g. `a > b > c` is equivalent to `a > b and b > c`, so this
# function helps standardize that.
# TODO: Whether And/Or and BitAnd/BitOr actually correspond to
# logical or bitwise operators depends on the data types that they
# are applied to. We'll need to add logic to map to that.
inner_ops = []
for op, (left, right) in zip(operators, operands):
# Note that this will lead to duplicate nodes, e.g. if
# the comparison is `a < b < c` that will be encoded as
# `a < b and b < c`. We could potentially optimize by caching
# expressions by name so that we only construct them once.
self.visit(left)
self.visit(right)
self.nodes.append(self.stack.pop())
self.nodes.append(self.stack.pop())
op = python_cudf_operator_map[type(op)]
inner_ops.append(Operation(op, self.nodes[-1], self.nodes[-2]))
self.nodes.extend(inner_ops)
# If we have more than one comparator, we need to link them
# together with LOGICAL_AND operators.
if has_multiple_ops:
op = ASTOperator.LOGICAL_AND
def _combine_compare_ops(left, right):
self.nodes.append(Operation(op, left, right))
return self.nodes[-1]
functools.reduce(_combine_compare_ops, inner_ops)
self.stack.append(self.nodes[-1])
def visit_BoolOp(self, node):
operators = [node.op] * (len(node.values) - 1)
operands = zip(node.values[:-1], node.values[1:])
self._visit_BoolOp_Compare(operators, operands, len(node.values) > 2)
def visit_Compare(self, node):
operands = (node.left, *node.comparators)
has_multiple_ops = len(operands) > 2
operands = zip(operands[:-1], operands[1:])
self._visit_BoolOp_Compare(node.ops, operands, has_multiple_ops)
def visit_Call(self, node):
try:
op = python_cudf_function_map[node.func.id]
except KeyError:
raise ValueError(f"Unsupported function {node.func}.")
# Assuming only unary functions are supported, which is checked above.
if len(node.args) != 1 or node.keywords:
raise ValueError(
f"Function {node.func} only accepts one positional "
"argument."
)
self.visit(node.args[0])
self.nodes.append(self.stack.pop())
self.stack.append(Operation(op, self.nodes[-1]))
@functools.lru_cache(256)
def parse_expression(expr: str, col_names: Tuple[str]):
visitor = libcudfASTVisitor(col_names)
visitor.visit(ast.parse(expr))
return visitor
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/_internals/__init__.py
|
# Copyright (c) 2021, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/buffer/spillable_buffer.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from __future__ import annotations
import collections.abc
import pickle
import time
import weakref
from threading import RLock
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
import numpy
from typing_extensions import Self
import rmm
from cudf.core.buffer.buffer import (
Buffer,
cuda_array_interface_wrapper,
get_ptr_and_size,
host_memory_allocation,
)
from cudf.utils.nvtx_annotation import _get_color_for_nvtx, annotate
from cudf.utils.string import format_bytes
if TYPE_CHECKING:
from cudf.core.buffer.spill_manager import SpillManager
def get_spillable_owner(data) -> Optional[SpillableBuffer]:
"""Get the spillable owner of `data`, if any exist
Search through the stack of data owners in order to find an
owner of type `SpillableBuffer` (not subclasses).
Parameters
----------
data : buffer-like or array-like
A buffer-like or array-like object that represent C-contiguous memory.
Return
------
SpillableBuffer or None
The owner of `data` if spillable or None.
"""
if type(data) is SpillableBuffer:
return data
if hasattr(data, "owner"):
return get_spillable_owner(data.owner)
return None
def as_spillable_buffer(data, exposed: bool) -> SpillableBuffer:
"""Factory function to wrap `data` in a SpillableBuffer object.
If `data` isn't a buffer already, a new buffer that points to the memory of
`data` is created. If `data` represents host memory, it is copied to a new
`rmm.DeviceBuffer` device allocation. Otherwise, the memory of `data` is
**not** copied, instead the new buffer keeps a reference to `data` in order
to retain its lifetime.
If `data` is owned by a spillable buffer, a "slice" of the buffer is
returned. In this case, the spillable buffer must either be "exposed" or
spilled locked (called within an acquire_spill_lock context). This is to
guarantee that the memory of `data` isn't spilled before this function gets
to calculate the offset of the new slice.
It is illegal for a spillable buffer to own another spillable buffer.
Parameters
----------
data : buffer-like or array-like
A buffer-like or array-like object that represent C-contiguous memory.
exposed : bool, optional
Mark the buffer as permanently exposed (unspillable).
Return
------
SpillableBuffer
A spillabe buffer instance that represents the device memory of `data`.
"""
from cudf.core.buffer.utils import get_spill_lock
if not hasattr(data, "__cuda_array_interface__"):
if exposed:
raise ValueError("cannot created exposed host memory")
return SpillableBuffer._from_host_memory(data)
spillable_owner = get_spillable_owner(data)
if spillable_owner is None:
return SpillableBuffer._from_device_memory(data, exposed=exposed)
if not spillable_owner.exposed and get_spill_lock() is None:
raise ValueError(
"A owning spillable buffer must "
"either be exposed or spilled locked."
)
# At this point, we know that `data` is owned by a spillable buffer,
# which is exposed or spilled locked.
ptr, size = get_ptr_and_size(data.__cuda_array_interface__)
base_ptr = spillable_owner.memory_info()[0]
return SpillableBufferSlice(
spillable_owner, offset=ptr - base_ptr, size=size
)
class SpillLock:
pass
class DelayedPointerTuple(collections.abc.Sequence):
"""
A delayed version of the "data" field in __cuda_array_interface__.
The idea is to delay the access to `Buffer.ptr` until the user
actually accesses the data pointer.
For instance, in many cases __cuda_array_interface__ is accessed
only to determine whether an object is a CUDA object or not.
TODO: this doesn't support libraries such as PyTorch that declare
the tuple of __cuda_array_interface__["data"] in Cython. In such
cases, Cython will raise an error because DelayedPointerTuple
isn't a "real" tuple.
"""
def __init__(self, buffer) -> None:
self._buf = buffer
def __len__(self):
return 2
def __getitem__(self, i):
if i == 0:
return self._buf.get_ptr(mode="write")
elif i == 1:
return False
raise IndexError("tuple index out of range")
class SpillableBuffer(Buffer):
"""A Buffer that supports spilling memory off the GPU to avoid OOMs.
This buffer supports spilling the represented data to host memory.
Spilling can be done manually by calling `.spill(target="cpu")` but
usually the associated spilling manager triggers spilling based on current
device memory usage see `cudf.core.buffer.spill_manager.SpillManager`.
Unspill is triggered automatically when accessing the data of the buffer.
The buffer might not be spillable, which is based on the "expose" status
of the buffer. We say that the buffer has been exposed if the device
pointer (integer or void*) has been accessed outside of SpillableBuffer.
In this case, we cannot invalidate the device pointer by moving the data
to host.
A buffer can be exposed permanently at creation or by accessing the `.ptr`
property. To avoid this, one can use `.get_ptr()` instead, which support
exposing the buffer temporarily.
Use the factory function `as_buffer` to create a SpillableBuffer instance.
"""
lock: RLock
_spill_locks: weakref.WeakSet
_last_accessed: float
_ptr_desc: Dict[str, Any]
_exposed: bool
_manager: SpillManager
def _finalize_init(self, ptr_desc: Dict[str, Any], exposed: bool) -> None:
"""Finish initialization of the spillable buffer
This implements the common initialization that `_from_device_memory`
and `_from_host_memory` are missing.
Parameters
----------
ptr_desc : dict
Description of the memory.
exposed : bool, optional
Mark the buffer as permanently exposed (unspillable).
"""
from cudf.core.buffer.spill_manager import get_global_manager
self.lock = RLock()
self._spill_locks = weakref.WeakSet()
self._last_accessed = time.monotonic()
self._ptr_desc = ptr_desc
self._exposed = exposed
manager = get_global_manager()
if manager is None:
raise ValueError(
f"cannot create {self.__class__} without "
"a global spill manager"
)
self._manager = manager
self._manager.add(self)
@classmethod
def _from_device_memory(cls, data: Any, *, exposed: bool = False) -> Self:
"""Create a spillabe buffer from device memory.
No data is being copied.
Parameters
----------
data : device-buffer-like
An object implementing the CUDA Array Interface.
exposed : bool, optional
Mark the buffer as permanently exposed (unspillable).
Returns
-------
SpillableBuffer
Buffer representing the same device memory as `data`
"""
ret = super()._from_device_memory(data)
ret._finalize_init(ptr_desc={"type": "gpu"}, exposed=exposed)
return ret
@classmethod
def _from_host_memory(cls, data: Any) -> Self:
"""Create a spillabe buffer from host memory.
Data must implement `__array_interface__`, the buffer protocol, and/or
be convertible to a buffer object using `numpy.array()`
The new buffer is marked as spilled to host memory already.
Raises ValueError if array isn't C-contiguous.
Parameters
----------
data : Any
An object that represens host memory.
Returns
-------
SpillableBuffer
Buffer representing a copy of `data`.
"""
# Convert to a memoryview using numpy array, this will not copy data
# in most cases.
data = memoryview(numpy.array(data, copy=False, subok=True))
if not data.c_contiguous:
raise ValueError("Buffer data must be C-contiguous")
data = data.cast("B") # Make sure itemsize==1
# Create an already spilled buffer
ret = cls.__new__(cls)
ret._owner = None
ret._ptr = 0
ret._size = data.nbytes
ret._finalize_init(
ptr_desc={"type": "cpu", "memoryview": data}, exposed=False
)
return ret
@property
def is_spilled(self) -> bool:
return self._ptr_desc["type"] != "gpu"
def copy(self, deep: bool = True) -> Self:
spill_lock = SpillLock()
self.spill_lock(spill_lock=spill_lock)
return super().copy(deep=deep)
def spill(self, target: str = "cpu") -> None:
"""Spill or un-spill this buffer in-place
Parameters
----------
target : str
The target of the spilling.
"""
time_start = time.perf_counter()
with self.lock:
ptr_type = self._ptr_desc["type"]
if ptr_type == target:
return
if not self.spillable:
raise ValueError(
f"Cannot in-place move an unspillable buffer: {self}"
)
if (ptr_type, target) == ("gpu", "cpu"):
with annotate(
message="SpillDtoH",
color=_get_color_for_nvtx("SpillDtoH"),
domain="cudf_python-spill",
):
host_mem = host_memory_allocation(self.size)
rmm._lib.device_buffer.copy_ptr_to_host(
self._ptr, host_mem
)
self._ptr_desc["memoryview"] = host_mem
self._ptr = 0
self._owner = None
elif (ptr_type, target) == ("cpu", "gpu"):
# Notice, this operation is prone to deadlock because the RMM
# allocation might trigger spilling-on-demand which in turn
# trigger a new call to this buffer's `spill()`.
# Therefore, it is important that spilling-on-demand doesn't
# try to unspill an already locked buffer!
with annotate(
message="SpillHtoD",
color=_get_color_for_nvtx("SpillHtoD"),
domain="cudf_python-spill",
):
dev_mem = rmm.DeviceBuffer.to_device(
self._ptr_desc.pop("memoryview")
)
self._ptr = dev_mem.ptr
self._owner = dev_mem
assert self._size == dev_mem.size
else:
# TODO: support moving to disk
raise ValueError(f"Unknown target: {target}")
self._ptr_desc["type"] = target
time_end = time.perf_counter()
self._manager.statistics.log_spill(
src=ptr_type,
dst=target,
nbytes=self.size,
time=time_end - time_start,
)
def mark_exposed(self) -> None:
"""Mark the buffer as "exposed" and make it unspillable permanently.
This also unspills the buffer (unspillable buffers cannot be spilled!).
"""
self._manager.spill_to_device_limit()
with self.lock:
if not self._exposed:
self._manager.statistics.log_expose(self)
self.spill(target="gpu")
self._exposed = True
self._last_accessed = time.monotonic()
def spill_lock(self, spill_lock: SpillLock) -> None:
"""Spill lock the buffer
Mark the buffer as unspillable while `spill_lock` is alive,
which is tracked by monitoring a weakref to `spill_lock`.
Parameters
----------
spill_lock : SpillLock
The object that defines the scope of the lock.
"""
with self.lock:
self.spill(target="gpu")
self._spill_locks.add(spill_lock)
def get_ptr(self, *, mode: Literal["read", "write"]) -> int:
"""Get a device pointer to the memory of the buffer.
If this is called within an `acquire_spill_lock` context,
a reference to this buffer is added to spill_lock, which
disable spilling of this buffer while in the context.
If this is *not* called within a `acquire_spill_lock` context,
this buffer is marked as unspillable permanently.
Returns
-------
int
The device pointer as an integer
"""
from cudf.core.buffer.utils import get_spill_lock
spill_lock = get_spill_lock()
if spill_lock is None:
self.mark_exposed()
else:
self.spill_lock(spill_lock)
self._last_accessed = time.monotonic()
return self._ptr
def memory_info(self) -> Tuple[int, int, str]:
"""Get pointer, size, and device type of this buffer.
Warning, it is not safe to access the pointer value without
spill lock the buffer manually. This method neither exposes
nor spill locks the buffer.
Return
------
int
The memory pointer as an integer (device or host memory)
int
The size of the memory in bytes
str
The device type as a string ("cpu" or "gpu")
"""
if self._ptr_desc["type"] == "gpu":
ptr = self._ptr
elif self._ptr_desc["type"] == "cpu":
ptr = numpy.array(
self._ptr_desc["memoryview"], copy=False
).__array_interface__["data"][0]
return (ptr, self.nbytes, self._ptr_desc["type"])
@property
def owner(self) -> Any:
return self._owner
@property
def exposed(self) -> bool:
return self._exposed
@property
def spillable(self) -> bool:
return not self._exposed and len(self._spill_locks) == 0
@property
def size(self) -> int:
return self._size
@property
def nbytes(self) -> int:
return self._size
@property
def last_accessed(self) -> float:
return self._last_accessed
@property
def __cuda_array_interface__(self) -> dict:
return {
"data": DelayedPointerTuple(self),
"shape": (self.size,),
"strides": None,
"typestr": "|u1",
"version": 0,
}
def memoryview(
self, *, offset: int = 0, size: Optional[int] = None
) -> memoryview:
size = self._size if size is None else size
with self.lock:
if self.spillable:
self.spill(target="cpu")
return self._ptr_desc["memoryview"][offset : offset + size]
else:
assert self._ptr_desc["type"] == "gpu"
ret = host_memory_allocation(size)
rmm._lib.device_buffer.copy_ptr_to_host(
self._ptr + offset, ret
)
return ret
def _getitem(self, offset: int, size: int) -> SpillableBufferSlice:
return SpillableBufferSlice(base=self, offset=offset, size=size)
def serialize(self) -> Tuple[dict, list]:
"""Serialize the Buffer
Normally, we would use `[self]` as the frames. This would work but
also mean that `self` becomes exposed permanently if the frames are
later accessed through `__cuda_array_interface__`, which is exactly
what libraries like Dask+UCX would do when communicating!
The sound solution is to modify Dask et al. so that they access the
frames through `.get_ptr()` and holds on to the `spill_lock` until
the frame has been transferred. However, until this adaptation we
use a hack where the frame is a `Buffer` with a `spill_lock` as the
owner, which makes `self` unspillable while the frame is alive but
doesn't expose `self` when `__cuda_array_interface__` is accessed.
Warning, this hack means that the returned frame must be copied before
given to `.deserialize()`, otherwise we would have a `Buffer` pointing
to memory already owned by an existing `SpillableBuffer`.
"""
header: Dict[Any, Any]
frames: List[Buffer | memoryview]
with self.lock:
header = {}
header["type-serialized"] = pickle.dumps(self.__class__)
header["frame_count"] = 1
if self.is_spilled:
frames = [self.memoryview()]
else:
# TODO: Use `frames=[self]` instead of this hack, see doc above
spill_lock = SpillLock()
self.spill_lock(spill_lock)
ptr, size, _ = self.memory_info()
frames = [
Buffer._from_device_memory(
cuda_array_interface_wrapper(
ptr=ptr,
size=size,
owner=(self._owner, spill_lock),
)
)
]
return header, frames
def __repr__(self) -> str:
if self._ptr_desc["type"] != "gpu":
ptr_info = str(self._ptr_desc)
else:
ptr_info = str(hex(self._ptr))
return (
f"<SpillableBuffer size={format_bytes(self._size)} "
f"spillable={self.spillable} exposed={self.exposed} "
f"num-spill-locks={len(self._spill_locks)} "
f"ptr={ptr_info} owner={repr(self._owner)}>"
)
class SpillableBufferSlice(SpillableBuffer):
"""A slice of a spillable buffer
This buffer applies the slicing and then delegates all
operations to its base buffer.
Parameters
----------
base : SpillableBuffer
The base of the view
offset : int
Memory offset into the base buffer
size : int
Size of the view (in bytes)
"""
def __init__(self, base: SpillableBuffer, offset: int, size: int) -> None:
if size < 0:
raise ValueError("size cannot be negative")
if offset < 0:
raise ValueError("offset cannot be negative")
if offset + size > base.size:
raise ValueError(
"offset+size cannot be greater than the size of base"
)
self._base = base
self._offset = offset
self._size = size
self._owner = base
self.lock = base.lock
def get_ptr(self, *, mode: Literal["read", "write"]) -> int:
"""
A passthrough method to `SpillableBuffer.get_ptr`
with factoring in the `offset`.
"""
return self._base.get_ptr(mode=mode) + self._offset
def _getitem(self, offset: int, size: int) -> SpillableBufferSlice:
return SpillableBufferSlice(
base=self._base, offset=offset + self._offset, size=size
)
@classmethod
def deserialize(cls, header: dict, frames: list):
# TODO: because of the hack in `SpillableBuffer.serialize()` where
# frames are of type `Buffer`, we always deserialize as if they are
# `SpillableBuffer`. In the future, we should be able to
# deserialize into `SpillableBufferSlice` when the frames hasn't been
# copied.
return SpillableBuffer.deserialize(header, frames)
def memoryview(
self, *, offset: int = 0, size: Optional[int] = None
) -> memoryview:
size = self._size if size is None else size
return self._base.memoryview(offset=self._offset + offset, size=size)
def __repr__(self) -> str:
return (
f"<SpillableBufferSlice size={format_bytes(self._size)} "
f"offset={format_bytes(self._offset)} of {self._base} "
)
# The rest of the methods delegate to the base buffer.
def spill(self, target: str = "cpu") -> None:
return self._base.spill(target=target)
@property
def is_spilled(self) -> bool:
return self._base.is_spilled
@property
def exposed(self) -> bool:
return self._base.exposed
@property
def spillable(self) -> bool:
return self._base.spillable
def spill_lock(self, spill_lock: SpillLock) -> None:
self._base.spill_lock(spill_lock=spill_lock)
def memory_info(self) -> Tuple[int, int, str]:
(ptr, _, device_type) = self._base.memory_info()
return (ptr + self._offset, self.nbytes, device_type)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/buffer/buffer.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
import math
import pickle
from types import SimpleNamespace
from typing import Any, Dict, Literal, Mapping, Optional, Sequence, Tuple
import numpy
from typing_extensions import Self
import rmm
import cudf
from cudf.core.abc import Serializable
from cudf.utils.string import format_bytes
def host_memory_allocation(nbytes: int) -> memoryview:
"""Allocate host memory using NumPy
This is an alternative to `bytearray` to avoid memory initialization cost.
A `bytearray` is zero-initialized using `calloc`, which we don't need.
Additionally, `numpy.empty` both skips the zero-initialization and uses
hugepages when available <https://github.com/numpy/numpy/pull/14216>.
Parameters
----------
nbytes : int
Size of the new host allocation in bytes.
Return
------
memoryview
The new host allocation.
"""
return numpy.empty((nbytes,), dtype="u1").data
def cuda_array_interface_wrapper(
ptr: int,
size: int,
owner: Optional[object] = None,
readonly=False,
typestr="|u1",
version=0,
):
"""Wrap device pointer in an object that exposes `__cuda_array_interface__`
See <https://numba.readthedocs.io/en/stable/cuda/cuda_array_interface.html>
Parameters
----------
ptr : int
An integer representing a pointer to device memory.
size : int, optional
Size of device memory in bytes.
owner : object, optional
Python object to which the lifetime of the memory allocation is tied.
A reference to this object is kept in the returned wrapper object.
readonly: bool, optional
Mark the interface read-only.
typestr: str, optional
The type string of the interface. By default this is "|u1", which
means "an unsigned integer with a not relevant byteorder". See:
<https://numpy.org/doc/stable/reference/arrays.interface.html>
version : bool, optional
The version of the interface.
Return
------
SimpleNamespace
An object that exposes `__cuda_array_interface__` and keeps a reference
to `owner`.
"""
if size < 0:
raise ValueError("size cannot be negative")
return SimpleNamespace(
__cuda_array_interface__={
"data": (ptr, readonly),
"shape": (size,),
"strides": None,
"typestr": typestr,
"version": version,
},
owner=owner,
)
class Buffer(Serializable):
"""A Buffer represents device memory.
Use the factory function `as_buffer` to create a Buffer instance.
"""
_ptr: int
_size: int
_owner: object
def __init__(self):
raise ValueError(
f"do not create a {self.__class__} directly, please "
"use the factory function `cudf.core.buffer.as_buffer`"
)
@classmethod
def _from_device_memory(cls, data: Any) -> Self:
"""Create a Buffer from an object exposing `__cuda_array_interface__`.
No data is being copied.
Parameters
----------
data : device-buffer-like
An object implementing the CUDA Array Interface.
Returns
-------
Buffer
Buffer representing the same device memory as `data`
"""
# Bypass `__init__` and initialize attributes manually
ret = cls.__new__(cls)
ret._owner = data
if isinstance(data, rmm.DeviceBuffer): # Common case shortcut
ret._ptr = data.ptr
ret._size = data.size
else:
ret._ptr, ret._size = get_ptr_and_size(
data.__cuda_array_interface__
)
if ret.size < 0:
raise ValueError("size cannot be negative")
return ret
@classmethod
def _from_host_memory(cls, data: Any) -> Self:
"""Create a Buffer from a buffer or array like object
Data must implement `__array_interface__`, the buffer protocol, and/or
be convertible to a buffer object using `numpy.array()`
The host memory is copied to a new device allocation.
Raises ValueError if array isn't C-contiguous.
Parameters
----------
data : Any
An object that represens host memory.
Returns
-------
Buffer
Buffer representing a copy of `data`.
"""
# Convert to numpy array, this will not copy data in most cases.
ary = numpy.array(data, copy=False, subok=True)
# Extract pointer and size
ptr, size = get_ptr_and_size(ary.__array_interface__)
# Copy to device memory
buf = rmm.DeviceBuffer(ptr=ptr, size=size)
# Create from device memory
return cls._from_device_memory(buf)
def _getitem(self, offset: int, size: int) -> Self:
"""
Sub-classes can overwrite this to implement __getitem__
without having to handle non-slice inputs.
"""
return self._from_device_memory(
cuda_array_interface_wrapper(
ptr=self.get_ptr(mode="read") + offset,
size=size,
owner=self.owner,
)
)
def __getitem__(self, key: slice) -> Self:
"""Create a new slice of the buffer."""
if not isinstance(key, slice):
raise TypeError(
"Argument 'key' has incorrect type "
f"(expected slice, got {key.__class__.__name__})"
)
start, stop, step = key.indices(self.size)
if step != 1:
raise ValueError("slice must be C-contiguous")
return self._getitem(offset=start, size=stop - start)
def copy(self, deep: bool = True) -> Self:
"""
Return a copy of Buffer.
Parameters
----------
deep : bool, default True
If True, returns a deep copy of the underlying Buffer data.
If False, returns a shallow copy of the Buffer pointing to
the same underlying data.
Returns
-------
Buffer
"""
if deep:
return self._from_device_memory(
rmm.DeviceBuffer(ptr=self.get_ptr(mode="read"), size=self.size)
)
else:
return self[:]
@property
def size(self) -> int:
"""Size of the buffer in bytes."""
return self._size
@property
def nbytes(self) -> int:
"""Size of the buffer in bytes."""
return self._size
@property
def owner(self) -> Any:
"""Object owning the memory of the buffer."""
return self._owner
@property
def __cuda_array_interface__(self) -> Mapping:
"""Implementation of the CUDA Array Interface."""
return {
"data": (self.get_ptr(mode="write"), False),
"shape": (self.size,),
"strides": None,
"typestr": "|u1",
"version": 0,
}
def get_ptr(self, *, mode: Literal["read", "write"]) -> int:
"""Device pointer to the start of the buffer.
Parameters
----------
mode : str
Supported values are {"read", "write"}
If "write", the data pointed to may be modified
by the caller. If "read", the data pointed to
must not be modified by the caller.
Failure to fulfill this contract will cause
incorrect behavior.
Returns
-------
int
The device pointer as an integer
See Also
--------
SpillableBuffer.get_ptr
ExposureTrackedBuffer.get_ptr
"""
return self._ptr
def memoryview(
self, *, offset: int = 0, size: Optional[int] = None
) -> memoryview:
"""Read-only access to the buffer through host memory."""
size = self._size if size is None else size
host_buf = host_memory_allocation(size)
rmm._lib.device_buffer.copy_ptr_to_host(
self.get_ptr(mode="read") + offset, host_buf
)
return memoryview(host_buf).toreadonly()
def serialize(self) -> Tuple[dict, list]:
"""Serialize the buffer into header and frames.
The frames can be a mixture of memoryview and Buffer objects.
Returns
-------
Tuple[dict, List]
The first element of the returned tuple is a dict containing any
serializable metadata required to reconstruct the object. The
second element is a list containing Buffers and memoryviews.
"""
header: Dict[str, Any] = {}
header["type-serialized"] = pickle.dumps(type(self))
header["frame_count"] = 1
frames = [self]
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list) -> Self:
"""Create an Buffer from a serialized representation.
Parameters
----------
header : dict
The metadata required to reconstruct the object.
frames : list
The Buffer and memoryview that makes up the Buffer.
Returns
-------
Buffer
The deserialized Buffer.
"""
if header["frame_count"] != 1:
raise ValueError("Deserializing a Buffer expect a single frame")
frame = frames[0]
if isinstance(frame, cls):
return frame # The frame is already deserialized
if hasattr(frame, "__cuda_array_interface__"):
return cls._from_device_memory(frame)
return cls._from_host_memory(frame)
def __repr__(self) -> str:
klass = self.__class__
name = f"{klass.__module__}.{klass.__qualname__}"
return (
f"<{name} size={format_bytes(self._size)} "
f"ptr={hex(self._ptr)} owner={repr(self._owner)}>"
)
def is_c_contiguous(
shape: Sequence[int], strides: Sequence[int], itemsize: int
) -> bool:
"""Determine if shape and strides are C-contiguous
Parameters
----------
shape : Sequence[int]
Number of elements in each dimension.
strides : Sequence[int]
The stride of each dimension in bytes.
itemsize : int
Size of an element in bytes.
Return
------
bool
The boolean answer.
"""
if any(dim == 0 for dim in shape):
return True
cumulative_stride = itemsize
for dim, stride in zip(reversed(shape), reversed(strides)):
if dim > 1 and stride != cumulative_stride:
return False
cumulative_stride *= dim
return True
def get_ptr_and_size(array_interface: Mapping) -> Tuple[int, int]:
"""Retrieve the pointer and size from an array interface.
Raises ValueError if array isn't C-contiguous.
Parameters
----------
array_interface : Mapping
The array interface metadata.
Return
------
pointer : int
The pointer to device or host memory
size : int
The size in bytes
"""
shape = array_interface["shape"] or (1,)
strides = array_interface["strides"]
itemsize = cudf.dtype(array_interface["typestr"]).itemsize
if strides is None or is_c_contiguous(shape, strides, itemsize):
nelem = math.prod(shape)
ptr = array_interface["data"][0] or 0
return ptr, nelem * itemsize
raise ValueError("Buffer data must be C-contiguous")
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/buffer/exposure_tracked_buffer.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
import weakref
from typing import (
Any,
Container,
Literal,
Mapping,
Optional,
Type,
TypeVar,
cast,
)
from typing_extensions import Self
import cudf
from cudf.core.buffer.buffer import Buffer, get_ptr_and_size
from cudf.utils.string import format_bytes
T = TypeVar("T", bound="ExposureTrackedBuffer")
def get_owner(data, klass: Type[T]) -> Optional[T]:
"""Get the owner of `data`, if any exist
Search through the stack of data owners in order to find an
owner of type `klass` (not subclasses).
Parameters
----------
data
The data object
Return
------
klass or None
The owner of `data` if `klass` or None.
"""
if type(data) is klass:
return data
if hasattr(data, "owner"):
return get_owner(data.owner, klass)
return None
def as_exposure_tracked_buffer(
data, exposed: bool, subclass: Optional[Type[T]] = None
) -> BufferSlice:
"""Factory function to wrap `data` in a slice of an exposure tracked buffer
If `subclass` is None, a new ExposureTrackedBuffer that points to the
memory of `data` is created and a BufferSlice that points to all of the
new ExposureTrackedBuffer is returned.
If `subclass` is not None, a new `subclass` is created instead. Still,
a BufferSlice that points to all of the new `subclass` is returned
It is illegal for an exposure tracked buffer to own another exposure
tracked buffer. When representing the same memory, we should have a single
exposure tracked buffer and multiple buffer slices.
Developer Notes
---------------
This function always returns slices thus all buffers in cudf will use
`BufferSlice` when copy-on-write is enabled. The slices implement
copy-on-write by trigging deep copies when write access is detected
and multiple slices points to the same exposure tracked buffer.
Parameters
----------
data : buffer-like or array-like
A buffer-like or array-like object that represents C-contiguous memory.
exposed
Mark the buffer as permanently exposed.
subclass
If not None, a subclass of ExposureTrackedBuffer to wrap `data`.
Return
------
BufferSlice
A buffer slice that points to a ExposureTrackedBuffer (or `subclass`),
which in turn wraps `data`.
"""
if not hasattr(data, "__cuda_array_interface__"):
if exposed:
raise ValueError("cannot created exposed host memory")
return cast(
BufferSlice, ExposureTrackedBuffer._from_host_memory(data)[:]
)
owner = get_owner(data, subclass or ExposureTrackedBuffer)
if owner is None:
return cast(
BufferSlice,
ExposureTrackedBuffer._from_device_memory(data, exposed=exposed)[
:
],
)
# At this point, we know that `data` is owned by a exposure tracked buffer
ptr, size = get_ptr_and_size(data.__cuda_array_interface__)
if size > 0 and owner._ptr == 0:
raise ValueError("Cannot create a non-empty slice of a null buffer")
return BufferSlice(base=owner, offset=ptr - owner._ptr, size=size)
class ExposureTrackedBuffer(Buffer):
"""A Buffer that tracks its "expose" status.
In order to implement copy-on-write and spillable buffers, we need the
ability to detect external access to the underlying memory. We say that
the buffer has been exposed if the device pointer (integer or void*) has
been accessed outside of ExposureTrackedBuffer. In this case, we have no
control over knowing if the data is being modified by a third-party.
Attributes
----------
_exposed
The current exposure status of the buffer. Notice, once the exposure
status becomes True, it should never change back.
_slices
The set of BufferSlice instances that point to this buffer.
"""
_exposed: bool
_slices: weakref.WeakSet[BufferSlice]
@property
def exposed(self) -> bool:
return self._exposed
def mark_exposed(self) -> None:
"""Mark the buffer as "exposed" permanently"""
self._exposed = True
@classmethod
def _from_device_memory(cls, data: Any, *, exposed: bool = False) -> Self:
"""Create an exposure tracked buffer from device memory.
No data is being copied.
Parameters
----------
data : device-buffer-like
An object implementing the CUDA Array Interface.
exposed : bool, optional
Mark the buffer as permanently exposed.
Returns
-------
ExposureTrackedBuffer
Buffer representing the same device memory as `data`
"""
ret = super()._from_device_memory(data)
ret._exposed = exposed
ret._slices = weakref.WeakSet()
return ret
def _getitem(self, offset: int, size: int) -> BufferSlice:
return BufferSlice(base=self, offset=offset, size=size)
@property
def __cuda_array_interface__(self) -> Mapping:
self.mark_exposed()
return super().__cuda_array_interface__
def __repr__(self) -> str:
return (
f"<ExposureTrackedBuffer exposed={self.exposed} "
f"size={format_bytes(self._size)} "
f"ptr={hex(self._ptr)} owner={repr(self._owner)}>"
)
class BufferSlice(ExposureTrackedBuffer):
"""A slice (aka. a view) of a exposure tracked buffer.
Parameters
----------
base
The exposure tracked buffer this slice refers to.
offset
The offset relative to the start memory of base (in bytes).
size
The size of the slice (in bytes)
passthrough_attributes
Name of attributes that are passed through to the base as-is.
"""
def __init__(
self,
base: ExposureTrackedBuffer,
offset: int,
size: int,
*,
passthrough_attributes: Container[str] = ("exposed",),
) -> None:
if size < 0:
raise ValueError("size cannot be negative")
if offset < 0:
raise ValueError("offset cannot be negative")
if offset + size > base.size:
raise ValueError(
"offset+size cannot be greater than the size of base"
)
self._base = base
self._offset = offset
self._size = size
self._owner = base
self._passthrough_attributes = passthrough_attributes
base._slices.add(self)
def __getattr__(self, name):
if name in self._passthrough_attributes:
return getattr(self._base, name)
raise AttributeError(
f"{self.__class__.__name__} object has no attribute {name}"
)
def _getitem(self, offset: int, size: int) -> BufferSlice:
return BufferSlice(
base=self._base, offset=offset + self._offset, size=size
)
def get_ptr(self, *, mode: Literal["read", "write"]) -> int:
if mode == "write" and cudf.get_option("copy_on_write"):
self.make_single_owner_inplace()
return self._base.get_ptr(mode=mode) + self._offset
def memoryview(
self, *, offset: int = 0, size: Optional[int] = None
) -> memoryview:
return self._base.memoryview(offset=self._offset + offset, size=size)
def copy(self, deep: bool = True) -> Self:
"""Return a copy of Buffer.
What actually happens when `deep == False` depends on the
"copy_on_write" option. When copy-on-write is enabled, a shallow copy
becomes a deep copy if the buffer has been exposed. This is because we
have no control over knowing if the data is being modified when the
buffer has been exposed to third-party.
Parameters
----------
deep : bool, default True
The semantics when copy-on-write is disabled:
- If deep=True, returns a deep copy of the underlying data.
- If deep=False, returns a shallow copy of the Buffer pointing
to the same underlying data.
The semantics when copy-on-write is enabled:
- From the users perspective, always a deep copy of the
underlying data. However, the data isn't actually copied
until someone writers to the returned buffer.
Returns
-------
BufferSlice
A slice pointing to either a new or the existing base buffer
depending on the expose status of the base buffer and the
copy-on-write option (see above).
"""
if cudf.get_option("copy_on_write"):
base_copy = self._base.copy(deep=deep or self.exposed)
else:
base_copy = self._base.copy(deep=deep)
return cast(Self, base_copy[self._offset : self._offset + self._size])
@property
def __cuda_array_interface__(self) -> Mapping:
if cudf.get_option("copy_on_write"):
self.make_single_owner_inplace()
return super().__cuda_array_interface__
def make_single_owner_inplace(self) -> None:
"""Make sure this slice is the only one pointing to the base.
This is used by copy-on-write to trigger a deep copy when write
access is detected.
Parameters
----------
data : device-buffer-like
An object implementing the CUDA Array Interface.
Returns
-------
Buffer
Buffer representing the same device memory as `data`
"""
if len(self._base._slices) > 1:
# If this is not the only slice pointing to `self._base`, we
# point to a new deep copy of the base.
t = self.copy(deep=True)
self._base = t._base
self._offset = t._offset
self._size = t._size
self._owner = t._base
self._base._slices.add(self)
def __repr__(self) -> str:
return (
f"<BufferSlice size={format_bytes(self._size)} "
f"offset={format_bytes(self._offset)} of {self._base}>"
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/buffer/__init__.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from cudf.core.buffer.buffer import Buffer, cuda_array_interface_wrapper
from cudf.core.buffer.exposure_tracked_buffer import ExposureTrackedBuffer
from cudf.core.buffer.spillable_buffer import SpillableBuffer, SpillLock
from cudf.core.buffer.utils import (
acquire_spill_lock,
as_buffer,
get_spill_lock,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/buffer/utils.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from __future__ import annotations
import threading
from contextlib import ContextDecorator
from typing import Any, Dict, Optional, Tuple, Union
from cudf.core.buffer.buffer import Buffer, cuda_array_interface_wrapper
from cudf.core.buffer.exposure_tracked_buffer import as_exposure_tracked_buffer
from cudf.core.buffer.spill_manager import get_global_manager
from cudf.core.buffer.spillable_buffer import SpillLock, as_spillable_buffer
from cudf.options import get_option
def as_buffer(
data: Union[int, Any],
*,
size: Optional[int] = None,
owner: Optional[object] = None,
exposed: bool = False,
) -> Buffer:
"""Factory function to wrap `data` in a Buffer object.
If `data` isn't a buffer already, a new buffer that points to the memory of
`data` is created. If `data` represents host memory, it is copied to a new
`rmm.DeviceBuffer` device allocation. Otherwise, the memory of `data` is
**not** copied, instead the new buffer keeps a reference to `data` in order
to retain its lifetime.
If `data` is an integer, it is assumed to point to device memory.
Raises ValueError if data isn't C-contiguous.
Parameters
----------
data : int or buffer-like or array-like
An integer representing a pointer to device memory or a buffer-like
or array-like object. When not an integer, `size` and `owner` must
be None.
size : int, optional
Size of device memory in bytes. Must be specified if `data` is an
integer.
owner : object, optional
Python object to which the lifetime of the memory allocation is tied.
A reference to this object is kept in the returned Buffer.
exposed : bool, optional
Mark the buffer as permanently exposed. This is used by
ExposureTrackedBuffer to determine when a deep copy is required and
by SpillableBuffer to mark the buffer unspillable.
Return
------
Buffer
A buffer instance that represents the device memory of `data`.
"""
if isinstance(data, Buffer):
return data
# We handle the integer argument in the factory function by wrapping
# the pointer in a `__cuda_array_interface__` exposing object so that
# the Buffer (and its sub-classes) do not have to.
if isinstance(data, int):
if size is None:
raise ValueError(
"size must be specified when `data` is an integer"
)
data = cuda_array_interface_wrapper(ptr=data, size=size, owner=owner)
elif size is not None or owner is not None:
raise ValueError(
"`size` and `owner` must be None when "
"`data` is a buffer-like or array-like object"
)
if get_option("copy_on_write"):
return as_exposure_tracked_buffer(data, exposed=exposed)
if get_global_manager() is not None:
return as_spillable_buffer(data, exposed=exposed)
if hasattr(data, "__cuda_array_interface__"):
return Buffer._from_device_memory(data)
return Buffer._from_host_memory(data)
_thread_spill_locks: Dict[int, Tuple[Optional[SpillLock], int]] = {}
def _push_thread_spill_lock() -> None:
_id = threading.get_ident()
spill_lock, count = _thread_spill_locks.get(_id, (None, 0))
if spill_lock is None:
spill_lock = SpillLock()
_thread_spill_locks[_id] = (spill_lock, count + 1)
def _pop_thread_spill_lock() -> None:
_id = threading.get_ident()
spill_lock, count = _thread_spill_locks[_id]
if count == 1:
spill_lock = None
_thread_spill_locks[_id] = (spill_lock, count - 1)
class acquire_spill_lock(ContextDecorator):
"""Decorator and context to set spill lock automatically.
All calls to `get_spill_lock()` within the decorated function or context
will return a spill lock with a lifetime bound to the function or context.
Developer Notes
---------------
We use the global variable `_thread_spill_locks` to track the global spill
lock state. To support concurrency, each thread tracks its own state by
pushing and popping from `_thread_spill_locks` using its thread ID.
"""
def __enter__(self) -> Optional[SpillLock]:
_push_thread_spill_lock()
return get_spill_lock()
def __exit__(self, *exc):
_pop_thread_spill_lock()
def get_spill_lock() -> Union[SpillLock, None]:
"""Return a spill lock within the context of `acquire_spill_lock` or None
Returns None, if spilling is disabled.
"""
if get_global_manager() is None:
return None
_id = threading.get_ident()
spill_lock, _ = _thread_spill_locks.get(_id, (None, 0))
return spill_lock
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/buffer/spill_manager.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from __future__ import annotations
import gc
import io
import textwrap
import threading
import traceback
import warnings
import weakref
from collections import defaultdict
from dataclasses import dataclass
from functools import partial
from typing import Dict, List, Optional, Tuple
import rmm.mr
from cudf.core.buffer.spillable_buffer import SpillableBuffer
from cudf.options import get_option
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.string import format_bytes
_spill_cudf_nvtx_annotate = partial(
_cudf_nvtx_annotate, domain="cudf_python-spill"
)
def get_traceback() -> str:
"""Pretty print current traceback to a string"""
with io.StringIO() as f:
traceback.print_stack(file=f)
f.seek(0)
return f.read()
def get_rmm_memory_resource_stack(
mr: rmm.mr.DeviceMemoryResource,
) -> List[rmm.mr.DeviceMemoryResource]:
"""Get the RMM resource stack
Parameters
----------
mr : rmm.mr.DeviceMemoryResource
Top of the resource stack
Return
------
list
List of RMM resources
"""
if hasattr(mr, "upstream_mr"):
return [mr] + get_rmm_memory_resource_stack(mr.upstream_mr)
return [mr]
class SpillStatistics:
"""Gather spill statistics
Levels of information gathered:
0 - disabled (no overhead).
1+ - duration and number of bytes spilled (very low overhead).
2+ - a traceback for each time a spillable buffer is exposed
permanently (potential high overhead).
The statistics are printed when spilling-on-demand fails to find
any buffer to spill. It is possible to retrieve the statistics
manually through the spill manager, see example below.
Parameters
----------
level : int
If not 0, enables statistics at the specified level.
Examples
--------
>>> import cudf
>>> from cudf.core.buffer.spill_manager import get_global_manager
>>> manager = get_global_manager()
>>> manager.statistics
<SpillStatistics level=1>
>>> df = cudf.DataFrame({"a": [1,2,3]})
>>> manager.spill_to_device_limit(1) # Spill df
24
>>> print(get_global_manager().statistics)
Spill Statistics (level=1):
Spilling (level >= 1):
gpu => cpu: 24B in 0.0033579860000827466s
"""
@dataclass
class Expose:
traceback: str
count: int = 1
total_nbytes: int = 0
spilled_nbytes: int = 0
spill_totals: Dict[Tuple[str, str], Tuple[int, float]]
def __init__(self, level) -> None:
self.lock = threading.Lock()
self.level = level
self.spill_totals = defaultdict(lambda: (0, 0))
# Maps each traceback to a Expose
self.exposes: Dict[str, SpillStatistics.Expose] = {}
def log_spill(self, src: str, dst: str, nbytes: int, time: float) -> None:
"""Log a (un-)spilling event
Parameters
----------
src : str
The memory location before spilling.
dst : str
The memory location after spilling.
nbytes : int
Number of bytes (un-)spilled.
nbytes : float
Elapsed time the event took in seconds.
"""
if self.level < 1:
return
with self.lock:
total_nbytes, total_time = self.spill_totals[(src, dst)]
self.spill_totals[(src, dst)] = (
total_nbytes + nbytes,
total_time + time,
)
def log_expose(self, buf: SpillableBuffer) -> None:
"""Log an expose event
We track logged exposes by grouping them by their traceback such
that `self.exposes` maps tracebacks (as strings) to their logged
data (as `Expose`).
Parameters
----------
buf : spillabe-buffer
The buffer being exposed.
"""
if self.level < 2:
return
with self.lock:
tb = get_traceback()
stat = self.exposes.get(tb, None)
spilled_nbytes = buf.nbytes if buf.is_spilled else 0
if stat is None:
self.exposes[tb] = self.Expose(
traceback=tb,
total_nbytes=buf.nbytes,
spilled_nbytes=spilled_nbytes,
)
else:
stat.count += 1
stat.total_nbytes += buf.nbytes
stat.spilled_nbytes += spilled_nbytes
def __repr__(self) -> str:
return f"<SpillStatistics level={self.level}>"
def __str__(self) -> str:
with self.lock:
ret = f"Spill Statistics (level={self.level}):\n"
if self.level == 0:
return ret[:-1] + " N/A"
# Print spilling stats
ret += " Spilling (level >= 1):"
if len(self.spill_totals) == 0:
ret += " None"
ret += "\n"
for (src, dst), (nbytes, time) in self.spill_totals.items():
ret += f" {src} => {dst}: "
ret += f"{format_bytes(nbytes)} in {time:.3f}s\n"
# Print expose stats
ret += " Exposed buffers (level >= 2): "
if self.level < 2:
return ret + "disabled"
if len(self.exposes) == 0:
ret += "None"
ret += "\n"
for s in sorted(self.exposes.values(), key=lambda x: -x.count):
ret += textwrap.indent(
(
f"exposed {s.count} times, "
f"total: {format_bytes(s.total_nbytes)}, "
f"spilled: {format_bytes(s.spilled_nbytes)}, "
f"traceback:\n{s.traceback}"
),
prefix=" " * 4,
)
return ret[:-1] # Remove last `\n`
class SpillManager:
"""Manager of spillable buffers.
This class implements tracking of all known spillable buffers, on-demand
spilling of said buffers, and (optionally) maintains a memory usage limit.
When `spill_on_demand=True`, the manager registers an RMM out-of-memory
error handler, which will spill spillable buffers in order to free up
memory.
When `device_memory_limit=<limit-in-bytes>`, the manager will try keep
the device memory usage below the specified limit by spilling of spillable
buffers continuously, which will introduce a modest overhead.
Notice, this is a soft limit. The memory usage might exceed the limit if
too many buffers are unspillable.
Parameters
----------
spill_on_demand : bool
Enable spill on demand.
device_memory_limit: int, optional
If not None, this is the device memory limit in bytes that triggers
device to host spilling. The global manager sets this to the value
of `CUDF_SPILL_DEVICE_LIMIT` or None.
statistic_level: int, optional
If not 0, enables statistics at the specified level. See
SpillStatistics for the different levels.
"""
_buffers: weakref.WeakValueDictionary[int, SpillableBuffer]
statistics: SpillStatistics
def __init__(
self,
*,
spill_on_demand: bool = False,
device_memory_limit: Optional[int] = None,
statistic_level: int = 0,
) -> None:
self._lock = threading.Lock()
self._buffers = weakref.WeakValueDictionary()
self._id_counter = 0
self._spill_on_demand = spill_on_demand
self._device_memory_limit = device_memory_limit
self.statistics = SpillStatistics(statistic_level)
if self._spill_on_demand:
# Set the RMM out-of-memory handle if not already set
mr = rmm.mr.get_current_device_resource()
if all(
not isinstance(m, rmm.mr.FailureCallbackResourceAdaptor)
for m in get_rmm_memory_resource_stack(mr)
):
rmm.mr.set_current_device_resource(
rmm.mr.FailureCallbackResourceAdaptor(
mr, self._out_of_memory_handle
)
)
def _out_of_memory_handle(self, nbytes: int, *, retry_once=True) -> bool:
"""Try to handle an out-of-memory error by spilling
This can by used as the callback function to RMM's
`FailureCallbackResourceAdaptor`
Parameters
----------
nbytes : int
Number of bytes to try to spill.
retry_once : bool, optional
If True, call `gc.collect()` and retry once.
Return
------
bool
True if any buffers were freed otherwise False.
Warning
-------
In order to avoid deadlock, this function should not lock
already locked buffers.
"""
# Let's try to spill device memory
spilled = self.spill_device_memory(nbytes=nbytes)
if spilled > 0:
return True # Ask RMM to retry the allocation
if retry_once:
# Let's collect garbage and try one more time
gc.collect()
return self._out_of_memory_handle(nbytes, retry_once=False)
# TODO: write to log instead of stdout
print(
f"[WARNING] RMM allocation of {format_bytes(nbytes)} bytes "
"failed, spill-on-demand couldn't find any device memory to "
f"spill:\n{repr(self)}\ntraceback:\n{get_traceback()}\n"
f"{self.statistics}"
)
return False # Since we didn't find anything to spill, we give up
def add(self, buffer: SpillableBuffer) -> None:
"""Add buffer to the set of managed buffers
The manager keeps a weak reference to the buffer
Parameters
----------
buffer : SpillableBuffer
The buffer to manage
"""
if buffer.size > 0 and not buffer.exposed:
with self._lock:
self._buffers[self._id_counter] = buffer
self._id_counter += 1
self.spill_to_device_limit()
def buffers(
self, order_by_access_time: bool = False
) -> Tuple[SpillableBuffer, ...]:
"""Get all managed buffers
Parameters
----------
order_by_access_time : bool, optional
Order the buffer by access time (ascending order)
Return
------
tuple
Tuple of buffers
"""
with self._lock:
ret = tuple(self._buffers.values())
if order_by_access_time:
ret = tuple(sorted(ret, key=lambda b: b.last_accessed))
return ret
@_spill_cudf_nvtx_annotate
def spill_device_memory(self, nbytes: int) -> int:
"""Try to spill device memory
This function is safe to call doing spill-on-demand
since it does not lock buffers already locked.
Parameters
----------
nbytes : int
Number of bytes to try to spill
Return
------
int
Number of actually bytes spilled.
"""
spilled = 0
for buf in self.buffers(order_by_access_time=True):
if buf.lock.acquire(blocking=False):
try:
if not buf.is_spilled and buf.spillable:
buf.spill(target="cpu")
spilled += buf.size
if spilled >= nbytes:
break
finally:
buf.lock.release()
return spilled
def spill_to_device_limit(self, device_limit: Optional[int] = None) -> int:
"""Try to spill device memory until device limit
Notice, by default this is a no-op.
Parameters
----------
device_limit : int, optional
Limit in bytes. If None, the value of the environment variable
`CUDF_SPILL_DEVICE_LIMIT` is used. If this is not set, the method
does nothing and returns 0.
Return
------
int
The number of bytes spilled.
"""
limit = (
self._device_memory_limit if device_limit is None else device_limit
)
if limit is None:
return 0
unspilled = sum(
buf.size for buf in self.buffers() if not buf.is_spilled
)
return self.spill_device_memory(nbytes=unspilled - limit)
def __repr__(self) -> str:
spilled = sum(buf.size for buf in self.buffers() if buf.is_spilled)
unspilled = sum(
buf.size for buf in self.buffers() if not buf.is_spilled
)
unspillable = 0
for buf in self.buffers():
if not (buf.is_spilled or buf.spillable):
unspillable += buf.size
unspillable_ratio = unspillable / unspilled if unspilled else 0
dev_limit = "N/A"
if self._device_memory_limit is not None:
dev_limit = format_bytes(self._device_memory_limit)
return (
f"<SpillManager spill_on_demand={self._spill_on_demand} "
f"device_memory_limit={dev_limit} | "
f"{format_bytes(spilled)} spilled | "
f"{format_bytes(unspilled)} ({unspillable_ratio:.0%}) "
f"unspilled (unspillable)>"
)
# The global manager has three states:
# - Uninitialized
# - Initialized to None (spilling disabled)
# - Initialized to a SpillManager instance (spilling enabled)
_global_manager_uninitialized: bool = True
_global_manager: Optional[SpillManager] = None
def set_global_manager(manager: Optional[SpillManager]) -> None:
"""Set the global manager, which if None disables spilling"""
global _global_manager, _global_manager_uninitialized
if _global_manager is not None:
gc.collect()
buffers = _global_manager.buffers()
if len(buffers) > 0:
warnings.warn(f"overwriting non-empty manager: {buffers}")
_global_manager = manager
_global_manager_uninitialized = False
def get_global_manager() -> Optional[SpillManager]:
"""Get the global manager or None if spilling is disabled"""
global _global_manager_uninitialized
if _global_manager_uninitialized:
manager = None
if get_option("spill"):
manager = SpillManager(
spill_on_demand=get_option("spill_on_demand"),
device_memory_limit=get_option("spill_device_limit"),
statistic_level=get_option("spill_stats"),
)
set_global_manager(manager)
return _global_manager
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/tools/datetimes.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
import math
import re
import warnings
from typing import Sequence, Union
import cupy as cp
import numpy as np
import pandas as pd
import pandas.tseries.offsets as pd_offset
from pandas.core.tools.datetimes import _unit_map
from typing_extensions import Self
import cudf
from cudf import _lib as libcudf
from cudf._lib.strings.convert.convert_integers import (
is_integer as cpp_is_integer,
)
from cudf.api.types import is_integer, is_scalar
from cudf.core import column
from cudf.core.index import as_index
_unit_dtype_map = {
"ns": "datetime64[ns]",
"us": "datetime64[us]",
"ms": "datetime64[ms]",
"m": "datetime64[s]",
"h": "datetime64[s]",
"s": "datetime64[s]",
"D": "datetime64[s]",
}
_offset_alias_to_code = {
"W": "W",
"D": "D",
"H": "h",
"h": "h",
"T": "m",
"min": "m",
"s": "s",
"S": "s",
"U": "us",
"us": "us",
"N": "ns",
"ns": "ns",
}
def to_datetime(
arg,
errors="raise",
dayfirst=False,
yearfirst=False,
utc=None,
format=None,
exact=True,
unit="ns",
infer_datetime_format=False,
origin="unix",
cache=True,
):
"""
Convert argument to datetime.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array,
Series DataFrame/dict-like
The object to convert to a datetime.
errors : {'ignore', 'raise', 'coerce', 'warn'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'warn' : prints last exceptions as warnings and
return the input.
- If 'ignore', then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
format : str, default None
The strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the
origin(unix epoch start).
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> import cudf
>>> df = cudf.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> cudf.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
>>> cudf.to_datetime(1490195805, unit='s')
numpy.datetime64('2017-03-22T15:16:45.000000000')
>>> cudf.to_datetime(1490195805433502912, unit='ns')
numpy.datetime64('1780-11-20T01:02:30.494253056')
"""
if errors not in {"ignore", "raise", "coerce", "warn"}:
raise ValueError(
f"errors parameter has to be either one of: "
f"{['ignore', 'raise', 'coerce', 'warn']}, found: "
f"{errors}"
)
elif errors in {"ignore", "coerce"} and not is_scalar(arg):
raise NotImplementedError(
f"{errors=} is not implemented when arg is not scalar-like"
)
if arg is None:
return None
if exact is False:
raise NotImplementedError("exact support is not yet implemented")
if origin != "unix":
raise NotImplementedError("origin support is not yet implemented")
if yearfirst:
raise NotImplementedError("yearfirst support is not yet implemented")
if utc:
raise NotImplementedError("utc is not yet implemented")
if format is not None:
if "%Z" in format or "%z" in format:
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
elif "%f" in format:
format = format.replace("%f", "%9f")
try:
if isinstance(arg, cudf.DataFrame):
# we require at least Ymd
required = ["year", "month", "day"]
req = list(set(required) - set(arg._data.names))
if len(req):
req = ",".join(req)
raise ValueError(
f"to assemble mappings requires at least that "
f"[year, month, day] be specified: [{req}] "
f"is missing"
)
# replace passed column name with values in _unit_map
unit = {k: get_units(k) for k in arg._data.names}
unit_rev = {v: k for k, v in unit.items()}
# keys we don't recognize
excess = set(unit_rev.keys()) - set(_unit_map.values())
if len(excess):
excess = ",".join(excess)
raise ValueError(
f"extra keys have been passed to the "
f"datetime assemblage: [{excess}]"
)
new_series = (
arg[unit_rev["year"]].astype("str")
+ "-"
+ arg[unit_rev["month"]].astype("str").str.zfill(2)
+ "-"
+ arg[unit_rev["day"]].astype("str").str.zfill(2)
)
format = "%Y-%m-%d"
col = new_series._column.as_datetime_column(
"datetime64[s]", format=format
)
for u in ["h", "m", "s", "ms", "us", "ns"]:
value = unit_rev.get(u)
if value is not None and value in arg:
arg_col = arg._data[value]
if arg_col.dtype.kind in ("f"):
col = new_series._column.as_datetime_column(
"datetime64[ns]", format=format
)
break
elif arg_col.dtype.kind in ("O"):
if not cpp_is_integer(arg_col).all():
col = new_series._column.as_datetime_column(
"datetime64[ns]", format=format
)
break
times_column = None
for u in ["h", "m", "s", "ms", "us", "ns"]:
value = unit_rev.get(u)
if value is not None and value in arg:
current_col = arg._data[value]
# If the arg[value] is of int or
# float dtype we don't want to type-cast
if current_col.dtype.kind in ("O"):
try:
current_col = current_col.astype(dtype="int64")
except ValueError:
current_col = current_col.astype(dtype="float64")
factor = cudf.Scalar(
column.datetime._unit_to_nanoseconds_conversion[u]
/ (
column.datetime._unit_to_nanoseconds_conversion[
"s"
]
if np.datetime_data(col.dtype)[0] == "s"
else 1
)
)
if times_column is None:
times_column = current_col * factor
else:
times_column = times_column + (current_col * factor)
if times_column is not None:
col = (col.astype(dtype="int64") + times_column).astype(
dtype=col.dtype
)
return cudf.Series(col, index=arg.index)
elif isinstance(arg, cudf.BaseIndex):
col = arg._values
col = _process_col(
col=col,
unit=unit,
dayfirst=dayfirst,
infer_datetime_format=infer_datetime_format,
format=format,
)
return as_index(col, name=arg.name)
elif isinstance(arg, (cudf.Series, pd.Series)):
col = column.as_column(arg)
col = _process_col(
col=col,
unit=unit,
dayfirst=dayfirst,
infer_datetime_format=infer_datetime_format,
format=format,
)
return cudf.Series(col, index=arg.index, name=arg.name)
else:
col = column.as_column(arg)
col = _process_col(
col=col,
unit=unit,
dayfirst=dayfirst,
infer_datetime_format=infer_datetime_format,
format=format,
)
if is_scalar(arg):
return col.element_indexing(0)
else:
return as_index(col)
except Exception as e:
if errors == "raise":
raise e
elif errors == "warn":
import traceback
tb = traceback.format_exc()
warnings.warn(tb)
elif errors == "ignore":
pass
elif errors == "coerce":
return np.datetime64("nat", "ns" if unit is None else unit)
return arg
def _process_col(col, unit, dayfirst, infer_datetime_format, format):
if col.dtype.kind == "M":
return col
elif col.dtype.kind in ("f"):
if unit not in (None, "ns"):
factor = cudf.Scalar(
column.datetime._unit_to_nanoseconds_conversion[unit]
)
col = col * factor
if format is not None:
# Converting to int because,
# pandas actually creates a datetime column
# out of float values and then creates an
# int column out of it to parse against `format`.
# Instead we directly cast to int and perform
# parsing against `format`.
col = (
col.astype("int")
.astype("str")
.as_datetime_column(
dtype="datetime64[us]"
if "%f" in format
else "datetime64[s]",
format=format,
)
)
else:
col = col.as_datetime_column(dtype="datetime64[ns]")
return col
elif col.dtype.kind in ("i"):
if unit in ("D", "h", "m"):
factor = cudf.Scalar(
column.datetime._unit_to_nanoseconds_conversion[unit]
/ column.datetime._unit_to_nanoseconds_conversion["s"]
)
col = col * factor
if format is not None:
col = col.astype("str").as_datetime_column(
dtype=_unit_dtype_map[unit], format=format
)
else:
col = col.as_datetime_column(dtype=_unit_dtype_map[unit])
return col
elif col.dtype.kind in ("O"):
if unit not in (None, "ns") or col.null_count == len(col):
try:
col = col.astype(dtype="int64")
except ValueError:
col = col.astype(dtype="float64")
return _process_col(
col=col,
unit=unit,
dayfirst=dayfirst,
infer_datetime_format=infer_datetime_format,
format=format,
)
else:
if format is None:
if not infer_datetime_format and dayfirst:
raise NotImplementedError(
f"{dayfirst=} not implemented "
f"when {format=} and {infer_datetime_format=}."
)
format = column.datetime.infer_format(
element=col.element_indexing(0),
dayfirst=dayfirst,
)
return col.as_datetime_column(
dtype=_unit_dtype_map[unit],
format=format,
)
raise TypeError(
f"dtype {col.dtype} cannot be converted to {_unit_dtype_map[unit]}"
)
def get_units(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
class DateOffset:
"""
An object used for binary ops where calendrical arithmetic
is desired rather than absolute time arithmetic. Used to
add or subtract a whole number of periods, such as several
months or years, to a series or index of datetime dtype.
Works similarly to pd.DateOffset, but stores the offset
on the device (GPU).
Parameters
----------
n : int, default 1
The number of time periods the offset represents.
**kwds
Temporal parameter that add to or replace the offset value.
Parameters that **add** to the offset (like Timedelta):
- months
See Also
--------
pandas.DateOffset : The equivalent Pandas object that this
object replicates
Examples
--------
>>> from cudf import DateOffset
>>> ts = cudf.Series([
... "2000-01-01 00:00:00.012345678",
... "2000-01-31 00:00:00.012345678",
... "2000-02-29 00:00:00.012345678",
... ], dtype='datetime64[ns]')
>>> ts + DateOffset(months=3)
0 2000-04-01 00:00:00.012345678
1 2000-04-30 00:00:00.012345678
2 2000-05-29 00:00:00.012345678
dtype: datetime64[ns]
>>> ts - DateOffset(months=12)
0 1999-01-01 00:00:00.012345678
1 1999-01-31 00:00:00.012345678
2 1999-02-28 00:00:00.012345678
dtype: datetime64[ns]
Notes
-----
Note that cuDF does not yet support DateOffset arguments
that 'replace' units in the datetime data being operated on
such as
- year
- month
- week
- day
- hour
- minute
- second
- microsecond
- millisecond
- nanosecond
cuDF does not yet support rounding via a `normalize`
keyword argument.
"""
_UNITS_TO_CODES = {
"nanoseconds": "ns",
"microseconds": "us",
"milliseconds": "ms",
"seconds": "s",
"minutes": "m",
"hours": "h",
"days": "D",
"weeks": "W",
"months": "M",
"years": "Y",
}
_CODES_TO_UNITS = {
"ns": "nanoseconds",
"us": "microseconds",
"ms": "milliseconds",
"L": "milliseconds",
"s": "seconds",
"m": "minutes",
"h": "hours",
"D": "days",
"W": "weeks",
"M": "months",
"Y": "years",
}
_TICK_OR_WEEK_TO_UNITS = {
pd_offset.Week: "weeks",
pd_offset.Day: "days",
pd_offset.Hour: "hours",
pd_offset.Minute: "minutes",
pd_offset.Second: "seconds",
pd_offset.Milli: "milliseconds",
pd_offset.Micro: "microseconds",
pd_offset.Nano: "nanoseconds",
}
_FREQSTR_REGEX = re.compile("([0-9]*)([a-zA-Z]+)")
def __init__(self, n=1, normalize=False, **kwds):
if normalize:
raise NotImplementedError(
"normalize not yet supported for DateOffset"
)
all_possible_units = {
"years",
"months",
"weeks",
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
"year",
"month",
"week",
"day",
"hour",
"minute",
"second",
"microsecond",
"millisecond",
"nanosecond",
}
supported_units = {
"years",
"months",
"weeks",
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
}
unsupported_units = all_possible_units - supported_units
invalid_kwds = set(kwds) - supported_units - unsupported_units
if invalid_kwds:
raise TypeError(
f"Keyword arguments '{','.join(list(invalid_kwds))}'"
" are not recognized"
)
unsupported_kwds = set(kwds) & unsupported_units
if unsupported_kwds:
raise NotImplementedError(
f"Keyword arguments '{','.join(list(unsupported_kwds))}'"
" are not yet supported."
)
if any(not is_integer(val) for val in kwds.values()):
raise ValueError("Non-integer periods not supported")
self._kwds = kwds
kwds = self._combine_months_and_years(**kwds)
kwds = self._combine_kwargs_to_seconds(**kwds)
scalars = {}
for k, v in kwds.items():
if k in all_possible_units:
# Months must be int16
if k == "months":
# TODO: throw for out-of-bounds int16 values
dtype = "int16"
else:
unit = self._UNITS_TO_CODES[k]
dtype = cudf.dtype(f"timedelta64[{unit}]")
scalars[k] = cudf.Scalar(v, dtype=dtype)
self._scalars = scalars
@property
def kwds(self):
return self._kwds
def _combine_months_and_years(self, **kwargs):
# TODO: if months is zero, don't do a binop
kwargs["months"] = kwargs.pop("years", 0) * 12 + kwargs.pop(
"months", 0
)
return kwargs
def _combine_kwargs_to_seconds(self, **kwargs):
"""
Combine days, weeks, hours and minutes to a single
scalar representing the total seconds
"""
seconds = 0
seconds += kwargs.pop("weeks", 0) * 604800
seconds += kwargs.pop("days", 0) * 86400
seconds += kwargs.pop("hours", 0) * 3600
seconds += kwargs.pop("minutes", 0) * 60
seconds += kwargs.pop("seconds", 0)
if seconds > np.iinfo("int64").max:
raise NotImplementedError(
"Total days + weeks + hours + minutes + seconds can not exceed"
f" {np.iinfo('int64').max} seconds"
)
if seconds != 0:
kwargs["seconds"] = seconds
return kwargs
def _datetime_binop(
self, datetime_col, op, reflect=False
) -> column.DatetimeColumn:
if reflect and op == "__sub__":
raise TypeError(
f"Can not subtract a {type(datetime_col).__name__}"
f" from a {type(self).__name__}"
)
if op not in {"__add__", "__sub__"}:
raise TypeError(
f"{op} not supported between {type(self).__name__}"
f" and {type(datetime_col).__name__}"
)
if not self._is_no_op:
if "months" in self._scalars:
rhs = self._generate_months_column(len(datetime_col), op)
datetime_col = libcudf.datetime.add_months(datetime_col, rhs)
for unit, value in self._scalars.items():
if unit != "months":
value = -value if op == "__sub__" else value
datetime_col += cudf.core.column.as_column(
value, length=len(datetime_col)
)
return datetime_col
def _generate_months_column(self, size, op):
months = self._scalars["months"]
months = -months if op == "__sub__" else months
# TODO: pass a scalar instead of constructing a column
# https://github.com/rapidsai/cudf/issues/6990
col = cudf.core.column.as_column(months, length=size)
return col
@property
def _is_no_op(self) -> bool:
# some logic could be implemented here for more complex cases
# such as +1 year, -12 months
return all(i == 0 for i in self._kwds.values())
def __neg__(self):
new_scalars = {k: -v for k, v in self._kwds.items()}
return DateOffset(**new_scalars)
def __repr__(self):
includes = []
for unit in sorted(self._UNITS_TO_CODES):
val = self._kwds.get(unit, None)
if val is not None:
includes.append(f"{unit}={val}")
unit_data = ", ".join(includes)
repr_str = f"<{self.__class__.__name__}: {unit_data}>"
return repr_str
@classmethod
def _from_freqstr(cls, freqstr: str) -> Self:
"""
Parse a string and return a DateOffset object
expects strings of the form 3D, 25W, 10ms, 42ns, etc.
"""
match = cls._FREQSTR_REGEX.match(freqstr)
if match is None:
raise ValueError(f"Invalid frequency string: {freqstr}")
numeric_part = match.group(1)
if numeric_part == "":
numeric_part = "1"
freq_part = match.group(2)
if freq_part not in cls._CODES_TO_UNITS:
raise ValueError(f"Cannot interpret frequency str: {freqstr}")
return cls(**{cls._CODES_TO_UNITS[freq_part]: int(numeric_part)})
@classmethod
def _from_pandas_ticks_or_weeks(
cls,
tick: Union[pd.tseries.offsets.Tick, pd.tseries.offsets.Week],
) -> Self:
return cls(**{cls._TICK_OR_WEEK_TO_UNITS[type(tick)]: tick.n})
def _maybe_as_fast_pandas_offset(self):
if (
len(self.kwds) == 1
and _has_fixed_frequency(self)
and not _has_non_fixed_frequency(self)
):
# Pandas computation between `n*offsets.Minute()` is faster than
# `n*DateOffset`. If only single offset unit is in use, we return
# the base offset for faster binary ops.
return pd.tseries.frequencies.to_offset(pd.Timedelta(**self.kwds))
return pd.DateOffset(**self.kwds, n=1)
def _isin_datetimelike(
lhs: Union[column.TimeDeltaColumn, column.DatetimeColumn], values: Sequence
) -> column.ColumnBase:
"""
Check whether values are contained in the
DateTimeColumn or TimeDeltaColumn.
Parameters
----------
lhs : TimeDeltaColumn or DatetimeColumn
Column to check whether the `values` exist in.
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result: Column
Column of booleans indicating if each element is in values.
"""
rhs = None
try:
rhs = cudf.core.column.as_column(values)
if rhs.dtype.kind in {"f", "i", "u"}:
return cudf.core.column.full(len(lhs), False, dtype="bool")
rhs = rhs.astype(lhs.dtype)
res = lhs._isin_earlystop(rhs)
if res is not None:
return res
except ValueError:
# pandas functionally returns all False when cleansing via
# typecasting fails
return cudf.core.column.full(len(lhs), False, dtype="bool")
res = lhs._obtain_isin_result(rhs)
return res
def date_range(
start=None,
end=None,
periods=None,
freq=None,
tz=None,
normalize=False,
name=None,
closed=None,
):
"""Return a fixed frequency DatetimeIndex.
Returns the range of equally spaced time points (where the difference
between any two adjacent points is specified by the given frequency)
such that they all satisfy `start` <[=] x <[=] `end`, where the first one
and the last one are, resp., the first and last time points in that range
that are valid for `freq`.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset
Frequencies to generate the datetime series. Mixed fixed-frequency and
non-fixed frequency offset is unsupported. See notes for detail.
Supported offset alias: ``D``, ``h``, ``H``, ``T``, ``min``, ``S``,
``U``, ``us``, ``N``, ``ns``.
tz : str or tzinfo, optional
Not Supported
normalize : bool, default False
Not Supported
name : str, default None
Name of the resulting DatetimeIndex
closed : {None, 'left', 'right'}, optional
Not Supported
Returns
-------
DatetimeIndex
Notes
-----
Of the four parameters `start`, `end`, `periods`, and `freq`, exactly three
must be specified. If `freq` is omitted, the resulting DatetimeIndex will
have periods linearly spaced elements between start and end (closed on both
sides).
cudf supports `freq` specified with either fixed-frequency offset
(such as weeks, days, hours, minutes...) or non-fixed frequency offset
(such as years and months). Specifying `freq` with a mixed fixed and
non-fixed frequency is currently unsupported. For example:
>>> cudf.date_range(
... start='2021-08-23 08:00:00',
... freq=cudf.DateOffset(months=2, days=5),
... periods=5)
...
NotImplementedError: Mixing fixed and non-fixed frequency offset is
unsupported.
Examples
--------
>>> cudf.date_range(
... start='2021-08-23 08:00:00',
... freq=cudf.DateOffset(years=1, months=2),
... periods=5)
DatetimeIndex(['2021-08-23 08:00:00', '2022-10-23 08:00:00',
'2023-12-23 08:00:00', '2025-02-23 08:00:00',
'2026-04-23 08:00:00'],
dtype='datetime64[ns]')
"""
if tz is not None:
raise NotImplementedError("tz is currently unsupported.")
if closed is not None:
raise NotImplementedError("closed is currently unsupported.")
if (start, end, periods, freq).count(None) > 1:
raise ValueError(
"Of the four parameters: start, end, periods, and freq, exactly "
"three must be specified"
)
dtype = np.dtype("<M8[ns]")
if freq is None:
# `start`, `end`, `periods` is specified, we treat the timestamps as
# integers and divide the number range evenly with `periods` elements.
start = cudf.Scalar(start, dtype=dtype).value.astype("int64")
end = cudf.Scalar(end, dtype=dtype).value.astype("int64")
arr = cp.linspace(start=start, stop=end, num=periods)
result = cudf.core.column.as_column(arr).astype("datetime64[ns]")
return cudf.DatetimeIndex._from_data({name: result})
elif cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError(
"`DatetimeIndex` with `freq` cannot be constructed."
)
# The code logic below assumes `freq` is defined. It is first normalized
# into `DateOffset` for further computation with timestamps.
if isinstance(freq, DateOffset):
offset = freq
elif isinstance(freq, str):
offset = pd.tseries.frequencies.to_offset(freq)
if not isinstance(offset, pd.tseries.offsets.Tick) and not isinstance(
offset, pd.tseries.offsets.Week
):
raise ValueError(
f"Unrecognized frequency string {freq}. cuDF does "
"not yet support month, quarter, year-anchored frequency."
)
offset = DateOffset._from_pandas_ticks_or_weeks(offset)
else:
raise TypeError("`freq` must be a `str` or cudf.DateOffset object.")
if _has_mixed_freqeuency(offset):
raise NotImplementedError(
"Mixing fixed and non-fixed frequency offset is unsupported."
)
# Depending on different combinations of `start`, `end`, `offset`,
# `periods`, the following logic makes sure before computing the sequence,
# `start`, `periods`, `offset` is defined
_periods_not_specified = False
if start is None:
end = cudf.Scalar(end, dtype=dtype)
start = cudf.Scalar(
pd.Timestamp(end.value)
- (periods - 1) * offset._maybe_as_fast_pandas_offset(),
dtype=dtype,
)
elif end is None:
start = cudf.Scalar(start, dtype=dtype)
elif periods is None:
# When `periods` is unspecified, its upper bound estimated by
# dividing the number of nanoseconds between two timestamps with
# the lower bound of `freq` in nanoseconds. While the final result
# may contain extra elements that exceeds `end`, they are trimmed
# as a post processing step. [1]
_periods_not_specified = True
start = cudf.Scalar(start, dtype=dtype)
end = cudf.Scalar(end, dtype=dtype)
_is_increment_sequence = end >= start
periods = math.ceil(
int(end - start) / _offset_to_nanoseconds_lower_bound(offset)
)
if periods < 0:
# Mismatched sign between (end-start) and offset, return empty
# column
periods = 0
elif periods == 0:
# end == start, return exactly 1 timestamp (start)
periods = 1
# We compute `end_estim` (the estimated upper bound of the date
# range) below, but don't always use it. We do this to ensure
# that the appropriate OverflowError is raised by Pandas in case
# of overflow.
# FIXME: when `end_estim` is out of bound, but the actual `end` is not,
# we shouldn't raise but compute the sequence as is. The trailing overflow
# part should get trimmed at the end.
end_estim = (
pd.Timestamp(start.value)
+ periods * offset._maybe_as_fast_pandas_offset()
).to_datetime64()
if "months" in offset.kwds or "years" in offset.kwds:
# If `offset` is non-fixed frequency, resort to libcudf.
res = libcudf.datetime.date_range(start.device_value, periods, offset)
if _periods_not_specified:
# As mentioned in [1], this is a post processing step to trim extra
# elements when `periods` is an estimated value. Only offset
# specified with non fixed frequencies requires trimming.
res = res.apply_boolean_mask(
(res <= end) if _is_increment_sequence else (res <= start)
)
else:
# If `offset` is fixed frequency, we generate a range of
# treating `start`, `stop` and `step` as ints:
stop = end_estim.astype("int64")
start = start.value.astype("int64")
step = _offset_to_nanoseconds_lower_bound(offset)
arr = cp.arange(start=start, stop=stop, step=step, dtype="int64")
res = cudf.core.column.as_column(arr).astype("datetime64[ns]")
return cudf.DatetimeIndex._from_data({name: res})
def _has_fixed_frequency(freq: DateOffset) -> bool:
"""Utility to determine if `freq` contains fixed frequency offset"""
fixed_frequencies = {
"weeks",
"days",
"hours",
"minutes",
"seconds",
"milliseconds",
"microseconds",
"nanoseconds",
}
return len(freq.kwds.keys() & fixed_frequencies) > 0
def _has_non_fixed_frequency(freq: DateOffset) -> bool:
"""Utility to determine if `freq` contains non-fixed frequency offset"""
non_fixed_frequencies = {"years", "months"}
return len(freq.kwds.keys() & non_fixed_frequencies) > 0
def _has_mixed_freqeuency(freq: DateOffset) -> bool:
"""Utility to determine if `freq` contains mixed fixed and non-fixed
frequency offset. e.g. {months=1, days=5}
"""
return _has_fixed_frequency(freq) and _has_non_fixed_frequency(freq)
def _offset_to_nanoseconds_lower_bound(offset: DateOffset) -> int:
"""Given a DateOffset, which can consist of either fixed frequency or
non-fixed frequency offset, convert to the smallest possible fixed
frequency offset based in nanoseconds.
Specifically, the smallest fixed frequency conversion for {months=1}
is 28 * nano_seconds_per_day, because 1 month contains at least 28 days.
Similarly, the smallest fixed frequency conversion for {year=1} is
365 * nano_seconds_per_day.
This utility is used to compute the upper bound of the count of timestamps
given a range of datetime and an offset.
"""
nanoseconds_per_day = 24 * 60 * 60 * 10**9
kwds = offset.kwds
return (
kwds.get("years", 0) * (365 * nanoseconds_per_day)
+ kwds.get("months", 0) * (28 * nanoseconds_per_day)
+ kwds.get("weeks", 0) * (7 * nanoseconds_per_day)
+ kwds.get("days", 0) * nanoseconds_per_day
+ kwds.get("hours", 0) * 3600 * 10**9
+ kwds.get("minutes", 0) * 60 * 10**9
+ kwds.get("seconds", 0) * 10**9
+ kwds.get("milliseconds", 0) * 10**6
+ kwds.get("microseconds", 0) * 10**3
+ kwds.get("nanoseconds", 0)
)
def _to_iso_calendar(arg):
formats = ["%G", "%V", "%u"]
if not isinstance(arg, (cudf.Index, cudf.core.series.DatetimeProperties)):
raise AttributeError(
"Can only use .isocalendar accessor with series or index"
)
if isinstance(arg, cudf.Index):
iso_params = [
arg._column.as_string_column(arg._values.dtype, fmt)
for fmt in formats
]
index = arg._column
elif isinstance(arg.series, cudf.Series):
iso_params = [arg.strftime(fmt) for fmt in formats]
index = arg.series.index
data = dict(zip(["year", "week", "day"], iso_params))
return cudf.DataFrame(data, index=index, dtype=np.int32)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/tools/numeric.py
|
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import warnings
import numpy as np
import pandas as pd
import cudf
from cudf import _lib as libcudf
from cudf._lib import strings as libstrings
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
is_categorical_dtype,
is_datetime_dtype,
is_list_dtype,
is_string_dtype,
is_struct_dtype,
is_timedelta_dtype,
)
from cudf.core.column import as_column
from cudf.utils.dtypes import can_convert_to_column
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument into numerical types.
Parameters
----------
arg : column-convertible
The object to convert to numeric types
errors : {'raise', 'ignore', 'coerce'}, defaults 'raise'
Policy to handle errors during parsing.
* 'raise' will notify user all errors encountered.
* 'ignore' will skip error and returns ``arg``.
* 'coerce' will leave invalid values as nulls.
downcast : {'integer', 'signed', 'unsigned', 'float'}, defaults None
If set, will try to down-convert the datatype of the
parsed results to smallest possible type. For each `downcast`
type, this method will determine the smallest possible
dtype from the following sets:
* {'integer', 'signed'}: all integer types greater or equal to
`np.int8`
* {'unsigned'}: all unsigned types greater or equal to `np.uint8`
* {'float'}: all floating types greater or equal to `np.float32`
Note that downcast behavior is decoupled from parsing. Errors
encountered during downcast is raised regardless of ``errors``
parameter.
Returns
-------
Series or ndarray
Depending on the input, if series is passed in, series is returned,
otherwise ndarray
Notes
-----
An important difference from pandas is that this function does not accept
mixed numeric/non-numeric type sequences. For example ``[1, 'a']``.
A ``TypeError`` will be raised when such input is received, regardless of
``errors`` parameter.
Examples
--------
>>> s = cudf.Series(['1', '2.0', '3e3'])
>>> cudf.to_numeric(s)
0 1.0
1 2.0
2 3000.0
dtype: float64
>>> cudf.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 3000.0
dtype: float32
>>> cudf.to_numeric(s, downcast='signed')
0 1
1 2
2 3000
dtype: int16
>>> s = cudf.Series(['apple', '1.0', '3e3'])
>>> cudf.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 3e3
dtype: object
>>> cudf.to_numeric(s, errors='coerce')
0 <NA>
1 1.0
2 3000.0
dtype: float64
"""
if errors not in {"raise", "ignore", "coerce"}:
raise ValueError("invalid error value specified")
if downcast not in {None, "integer", "signed", "unsigned", "float"}:
raise ValueError("invalid downcasting method provided")
if not can_convert_to_column(arg) or (
hasattr(arg, "ndim") and arg.ndim > 1
):
raise ValueError("arg must be column convertible")
col = as_column(arg)
dtype = col.dtype
if is_datetime_dtype(dtype) or is_timedelta_dtype(dtype):
col = col.as_numerical_column(cudf.dtype("int64"))
elif is_categorical_dtype(dtype):
cat_dtype = col.dtype.type
if _is_non_decimal_numeric_dtype(cat_dtype):
col = col.as_numerical_column(cat_dtype)
else:
try:
col = _convert_str_col(
col._get_decategorized_column(), errors, downcast
)
except ValueError as e:
if errors == "ignore":
return arg
else:
raise e
elif is_string_dtype(dtype):
try:
col = _convert_str_col(col, errors, downcast)
except ValueError as e:
if errors == "ignore":
return arg
else:
raise e
elif is_list_dtype(dtype) or is_struct_dtype(dtype):
raise ValueError("Input does not support nested datatypes")
elif _is_non_decimal_numeric_dtype(dtype):
pass
else:
raise ValueError("Unrecognized datatype")
# str->float conversion may require lower precision
if col.dtype == cudf.dtype("f"):
col = col.as_numerical_column("d")
if downcast:
if downcast == "float":
# we support only float32 & float64
type_set = [
cudf.dtype(np.float32).char,
cudf.dtype(np.float64).char,
]
elif downcast in ("integer", "signed"):
type_set = list(np.typecodes["Integer"])
elif downcast == "unsigned":
type_set = list(np.typecodes["UnsignedInteger"])
for t in type_set:
downcast_dtype = cudf.dtype(t)
if downcast_dtype.itemsize <= col.dtype.itemsize:
if col.can_cast_safely(downcast_dtype):
col = libcudf.unary.cast(col, downcast_dtype)
break
if isinstance(arg, (cudf.Series, pd.Series)):
return cudf.Series(col)
else:
if col.has_nulls():
# To match pandas, always return a floating type filled with nan.
col = col.astype(float).fillna(np.nan)
return col.values
def _convert_str_col(col, errors, _downcast=None):
"""
Converts a string column to numeric column
Converts to integer column if all strings are integer-like (isinteger.all)
Otherwise, converts to float column if all strings are float-like (
isfloat.all)
If error == 'coerce', fill non-numerics strings with null
Looks ahead to ``downcast`` parameter, if the float may be casted to
integer, then only process in float32 pipeline.
Parameters
----------
col : The string column to convert, must be string dtype
errors : {'raise', 'ignore', 'coerce'}, same as ``to_numeric``
_downcast : Same as ``to_numeric``, see description for use
Returns
-------
Converted numeric column
"""
if not is_string_dtype(col):
raise TypeError("col must be string dtype.")
is_integer = libstrings.is_integer(col)
if is_integer.all():
return col.as_numerical_column(dtype=cudf.dtype("i8"))
col = _proc_inf_empty_strings(col)
is_float = libstrings.is_float(col)
if is_float.all():
if _downcast in {"unsigned", "signed", "integer"}:
warnings.warn(
UserWarning(
"Downcasting from float to int will be "
"limited by float32 precision."
)
)
return col.as_numerical_column(dtype=cudf.dtype("f"))
else:
return col.as_numerical_column(dtype=cudf.dtype("d"))
else:
if errors == "coerce":
col = libcudf.string_casting.stod(col)
non_numerics = is_float.unary_operator("not")
col[non_numerics] = None
return col
else:
raise ValueError("Unable to convert some strings to numerics.")
def _proc_inf_empty_strings(col):
"""Handles empty and infinity strings"""
col = libstrings.to_lower(col)
col = _proc_empty_strings(col)
col = _proc_inf_strings(col)
return col
def _proc_empty_strings(col):
"""Replaces empty strings with NaN"""
s = cudf.Series(col)
s = s.where(s != "", "NaN")
return s._column
def _proc_inf_strings(col):
"""Convert "inf/infinity" strings into "Inf", the native string
representing infinity in libcudf
"""
# TODO: This can be handled by libcudf in
# future see StringColumn.as_numerical_column
col = libstrings.replace_multi(
col,
as_column(["+", "inf", "inity"]),
as_column(["", "Inf", ""]),
)
return col
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/groupby/groupby.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import copy
import itertools
import pickle
import textwrap
import warnings
from collections import abc
from functools import cached_property
from typing import Any, Iterable, List, Optional, Tuple, Union
import cupy as cp
import numpy as np
import pandas as pd
import cudf
from cudf import _lib as libcudf
from cudf._lib import groupby as libgroupby
from cudf._lib.null_mask import bitmask_or
from cudf._lib.reshape import interleave_columns
from cudf._lib.sort import segmented_sort_by_key
from cudf._lib.types import size_type_dtype
from cudf._typing import AggType, DataFrameOrSeries, MultiColumnAggType
from cudf.api.types import is_bool_dtype, is_float_dtype, is_list_like
from cudf.core.abc import Serializable
from cudf.core.column.column import ColumnBase, arange, as_column
from cudf.core.column_accessor import ColumnAccessor
from cudf.core.join._join_helpers import _match_join_keys
from cudf.core.mixins import Reducible, Scannable
from cudf.core.multiindex import MultiIndex
from cudf.core.udf.groupby_utils import _can_be_jitted, jit_groupby_apply
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import GetAttrGetItemMixin
# The three functions below return the quantiles [25%, 50%, 75%]
# respectively, which are called in the describe() method to output
# the summary stats of a GroupBy object
def _quantile_25(x):
return x.quantile(0.25)
def _quantile_50(x):
return x.quantile(0.50)
def _quantile_75(x):
return x.quantile(0.75)
def _is_row_of(chunk, obj):
return (
isinstance(chunk, cudf.Series)
and isinstance(obj, cudf.DataFrame)
and len(chunk.index) == len(obj._column_names)
and (chunk.index.to_pandas() == pd.Index(obj._column_names)).all()
)
groupby_doc_template = textwrap.dedent(
"""Group using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the object,
applying a function, and combining the results. This can be used to
group large amounts of data and compute operations on these groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby. If by is a
function, it's called on each value of the object's index.
If a dict or Series is passed, the Series or dict VALUES will
be used to determine the groups (the Series' values are first
aligned; see .align() method). If an cupy array is passed, the
values are used as-is determine the groups. A label or list
of labels may be passed to group by the columns in self.
Notice that a tuple is interpreted as a (single) key.
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as
the index. Only relevant for DataFrame input.
as_index=False is effectively "SQL-style" grouped output.
sort : bool, default False
Sort result by group key. Differ from Pandas, cudf defaults to
``False`` for better performance. Note this does not influence
the order of observations within each group. Groupby preserves
the order of rows within each group.
group_keys : bool, optional
When calling apply and the ``by`` argument produces a like-indexed
result, add group keys to index to identify pieces. By default group
keys are not included when the result's index (and column) labels match
the inputs, and are included otherwise. This argument has no effect if
the result produced is not like-indexed with respect to the input.
{ret}
Examples
--------
**Series**
>>> ser = cudf.Series([390., 350., 30., 20.],
... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... name="Max Speed")
>>> ser
Falcon 390.0
Falcon 350.0
Parrot 30.0
Parrot 20.0
Name: Max Speed, dtype: float64
>>> ser.groupby(level=0).mean()
Falcon 370.0
Parrot 25.0
Name: Max Speed, dtype: float64
>>> ser.groupby(ser > 100).mean()
Max Speed
False 25.0
True 370.0
Name: Max Speed, dtype: float64
**DataFrame**
>>> import cudf
>>> import pandas as pd
>>> df = cudf.DataFrame({{
... 'Animal': ['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.],
... }})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = cudf.DataFrame({{'Max Speed': [390., 350., 30., 20.]}},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Wild 185.0
Captive 210.0
>>> df = cudf.DataFrame({{'A': 'a a b'.split(),
... 'B': [1,2,3],
... 'C': [4,6,5]}})
>>> g1 = df.groupby('A', group_keys=False)
>>> g2 = df.groupby('A', group_keys=True)
Notice that ``g1`` have ``g2`` have two groups, ``a`` and ``b``, and only
differ in their ``group_keys`` argument. Calling `apply` in various ways,
we can get different grouping results:
>>> g1[['B', 'C']].apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
In the above, the groups are not part of the index. We can have them included
by using ``g2`` where ``group_keys=True``:
>>> g2[['B', 'C']].apply(lambda x: x / x.sum())
B C
A
a 0 0.333333 0.4
1 0.666667 0.6
b 2 1.000000 1.0
"""
)
class GroupBy(Serializable, Reducible, Scannable):
obj: "cudf.core.indexed_frame.IndexedFrame"
_VALID_REDUCTIONS = {
"sum",
"prod",
"idxmin",
"idxmax",
"min",
"max",
"mean",
"median",
"nunique",
"first",
"last",
"var",
"std",
}
_VALID_SCANS = {
"cumsum",
"cummin",
"cummax",
}
# Necessary because the function names don't directly map to the docs.
_SCAN_DOCSTRINGS = {
"cumsum": {"op_name": "Cumulative sum"},
"cummin": {"op_name": "Cumulative min"},
"cummax": {"op_name": "Cumulative max"},
}
_MAX_GROUPS_BEFORE_WARN = 100
def __init__(
self,
obj,
by=None,
level=None,
sort=False,
as_index=True,
dropna=True,
group_keys=True,
):
"""
Group a DataFrame or Series by a set of columns.
Parameters
----------
by : optional
Specifies the grouping columns. Can be any of the following:
- A Python function called on each value of the object's index
- A dict or Series that maps index labels to group names
- A cudf.Index object
- A str indicating a column name
- An array of the same length as the object
- A Grouper object
- A list of the above
level : int, level_name or list, optional
For objects with a MultiIndex, `level` can be used to specify
grouping by one or more levels of the MultiIndex.
sort : bool, default False
Sort the result by group keys. Differ from Pandas, cudf defaults
to False for better performance.
as_index : bool, optional
If as_index=True (default), the group names appear
as the keys of the resulting DataFrame.
If as_index=False, the groups are returned as ordinary
columns of the resulting DataFrame, *if they are named columns*.
dropna : bool, optional
If True (default), do not include the "null" group.
"""
self.obj = obj
self._as_index = as_index
self._by = by.copy(deep=True) if isinstance(by, _Grouping) else by
self._level = level
self._sort = sort
self._dropna = dropna
self._group_keys = group_keys
if isinstance(self._by, _Grouping):
self._by._obj = self.obj
self.grouping = self._by
else:
self.grouping = _Grouping(obj, self._by, level)
def __iter__(self):
if isinstance(self._by, list) and len(self._by) == 1:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"In a future version of cudf, a length 1 tuple will be "
"returned when iterating over a groupby with a grouper equal "
"to a list of length 1. To avoid this warning, do not supply "
"a list with a single grouper.",
FutureWarning,
)
group_names, offsets, _, grouped_values = self._grouped()
if isinstance(group_names, cudf.BaseIndex):
group_names = group_names.to_pandas()
for i, name in enumerate(group_names):
yield name, grouped_values[offsets[i] : offsets[i + 1]]
@property
def dtypes(self):
"""
Return the dtypes in this group.
Returns
-------
pandas.DataFrame
The data type of each column of the group.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 3], 'b': ['x', 'y', 'z', 'a'],
... 'c':[10, 11, 12, 12]})
>>> df.groupby("a").dtypes
b c
a
1 object int64
2 object int64
3 object int64
"""
index = self.grouping.keys.unique().sort_values().to_pandas()
obj_dtypes = self.obj._dtypes
return pd.DataFrame(
{
name: [obj_dtypes[name]] * len(index)
for name in self.grouping.values._column_names
},
index=index,
)
@cached_property
def groups(self):
"""
Returns a dictionary mapping group keys to row labels.
"""
group_names, offsets, _, grouped_values = self._grouped()
grouped_index = grouped_values.index
if len(group_names) > self._MAX_GROUPS_BEFORE_WARN:
warnings.warn(
f"GroupBy.groups() performance scales poorly with "
f"number of groups. Got {len(group_names)} groups."
)
return dict(
zip(group_names.to_pandas(), grouped_index._split(offsets[1:-1]))
)
@_cudf_nvtx_annotate
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})
>>> df
X Y
0 A 1
1 B 4
2 A 3
3 B 2
>>> df.groupby("X").get_group("A")
X Y
0 A 1
2 A 3
"""
if obj is None:
obj = self.obj
return obj.loc[self.groups[name]]
@_cudf_nvtx_annotate
def size(self):
"""
Return the size of each group.
"""
return (
cudf.Series(
cudf.core.column.column_empty(
len(self.obj), "int8", masked=False
)
)
.groupby(self.grouping, sort=self._sort, dropna=self._dropna)
.agg("size")
)
@_cudf_nvtx_annotate
def cumcount(self):
"""
Return the cumulative count of keys in each group.
"""
return (
cudf.Series(
cudf.core.column.column_empty(
len(self.obj), "int8", masked=False
),
index=self.obj.index,
)
.groupby(self.grouping, sort=self._sort)
.agg("cumcount")
)
@_cudf_nvtx_annotate
def rank(
self,
method="average",
ascending=True,
na_option="keep",
pct=False,
axis=0,
):
"""
Return the rank of values within each group.
"""
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
if na_option not in {"keep", "top", "bottom"}:
raise ValueError(
f"na_option must be one of 'keep', 'top', or 'bottom', "
f"but got {na_option}"
)
# TODO: in pandas compatibility mode, we should convert any
# NaNs to nulls in any float value columns, as Pandas
# treats NaNs the way we treat nulls.
if cudf.get_option("mode.pandas_compatible"):
if any(
is_float_dtype(typ)
for typ in self.grouping.values._dtypes.values()
):
raise NotImplementedError(
"NaNs are not supported in groupby.rank."
)
def rank(x):
return getattr(x, "rank")(
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
result = self.agg(rank)
if cudf.get_option("mode.pandas_compatible"):
# pandas always returns floats:
return result.astype("float64")
return result
@cached_property
def _groupby(self):
return libgroupby.GroupBy(
[*self.grouping.keys._columns], dropna=self._dropna
)
@_cudf_nvtx_annotate
def agg(self, func):
"""
Apply aggregation(s) to the groups.
Parameters
----------
func : str, callable, list or dict
Argument specifying the aggregation(s) to perform on the
groups. `func` can be any of the following:
- string: the name of a supported aggregation
- callable: a function that accepts a Series/DataFrame and
performs a supported operation on it.
- list: a list of strings/callables specifying the
aggregations to perform on every column.
- dict: a mapping of column names to string/callable
specifying the aggregations to perform on those
columns.
See :ref:`the user guide <basics.groupby>` for supported
aggregations.
Returns
-------
A Series or DataFrame containing the combined results of the
aggregation(s).
Examples
--------
>>> import cudf
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> a.groupby('a').agg('sum')
b c
a
2 3 1
1 3 4
Specifying a list of aggregations to perform on each column.
>>> import cudf
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> a.groupby('a').agg(['sum', 'min'])
b c
sum min sum min
a
2 3 3 1 1
1 3 1 4 2
Using a dict to specify aggregations to perform per column.
>>> import cudf
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> a.groupby('a').agg({'a': 'max', 'b': ['min', 'mean']})
a b
max min mean
a
2 2 3 3.0
1 1 1 1.5
Using lambdas/callables to specify aggregations taking parameters.
>>> import cudf
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> f1 = lambda x: x.quantile(0.5); f1.__name__ = "q0.5"
>>> f2 = lambda x: x.quantile(0.75); f2.__name__ = "q0.75"
>>> a.groupby('a').agg([f1, f2])
b c
q0.5 q0.75 q0.5 q0.75
a
1 1.5 1.75 2.0 2.0
2 3.0 3.00 1.0 1.0
"""
column_names, columns, normalized_aggs = self._normalize_aggs(func)
orig_dtypes = tuple(c.dtype for c in columns)
# Note: When there are no key columns, the below produces
# a Float64Index, while Pandas returns an Int64Index
# (GH: 6945)
(
result_columns,
grouped_key_cols,
included_aggregations,
) = self._groupby.aggregate(columns, normalized_aggs)
result_index = self.grouping.keys._from_columns_like_self(
grouped_key_cols,
)
multilevel = _is_multi_agg(func)
data = {}
for col_name, aggs, cols, orig_dtype in zip(
column_names,
included_aggregations,
result_columns,
orig_dtypes,
):
for agg_tuple, col in zip(aggs, cols):
agg, agg_kind = agg_tuple
agg_name = agg.__name__ if callable(agg) else agg
if multilevel:
key = (col_name, agg_name)
else:
key = col_name
if (
agg in {list, "collect"}
and orig_dtype != col.dtype.element_type
):
# Structs lose their labels which we reconstruct here
col = col._with_type_metadata(cudf.ListDtype(orig_dtype))
if agg_kind in {"COUNT", "SIZE"}:
data[key] = col.astype("int64")
elif (
self.obj.empty
and (
isinstance(agg_name, str)
and agg_name in Reducible._SUPPORTED_REDUCTIONS
)
and len(col) == 0
and not isinstance(
col,
(
cudf.core.column.ListColumn,
cudf.core.column.StructColumn,
cudf.core.column.DecimalBaseColumn,
),
)
):
data[key] = col.astype(orig_dtype)
else:
data[key] = col
data = ColumnAccessor(data, multiindex=multilevel)
if not multilevel:
data = data.rename_levels({np.nan: None}, level=0)
result = cudf.DataFrame._from_data(data, index=result_index)
if self._sort:
result = result.sort_index()
else:
if cudf.get_option(
"mode.pandas_compatible"
) and not libgroupby._is_all_scan_aggregate(normalized_aggs):
# Even with `sort=False`, pandas guarantees that
# groupby preserves the order of rows within each group.
left_cols = list(
self.grouping.keys.drop_duplicates()._data.columns
)
right_cols = list(result_index._data.columns)
join_keys = [
_match_join_keys(lcol, rcol, "left")
for lcol, rcol in zip(left_cols, right_cols)
]
# TODO: In future, see if we can centralize
# logic else where that has similar patterns.
join_keys = map(list, zip(*join_keys))
_, indices = libcudf.join.join(
*join_keys,
how="left",
)
result = result.take(indices)
if isinstance(result._index, cudf.CategoricalIndex):
# Needs re-ordering the categories in the order
# they are after grouping.
result._index = cudf.Index(
result._index._column.reorder_categories(
result._index._column._get_decategorized_column()
),
name=result._index.name,
)
if not self._as_index:
result = result.reset_index()
if libgroupby._is_all_scan_aggregate(normalized_aggs):
# Scan aggregations return rows in original index order
return self._mimic_pandas_order(result)
return result
def _reduce(
self,
op: str,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
"""Compute {op} of group values.
Parameters
----------
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data.
min_count : int, default 0
The required number of valid values to perform the operation. If
fewer than ``min_count`` non-NA values are present the result will
be NA.
Returns
-------
Series or DataFrame
Computed {op} of values within each group.
Notes
-----
Difference from pandas:
* Not supporting: numeric_only, min_count
"""
if numeric_only:
raise NotImplementedError(
"numeric_only parameter is not implemented yet"
)
if min_count != 0:
raise NotImplementedError(
"min_count parameter is not implemented yet"
)
return self.agg(op)
def _scan(self, op: str, *args, **kwargs):
"""{op_name} for each group."""
return self.agg(op)
aggregate = agg
def _head_tail(self, n, *, take_head: bool, preserve_order: bool):
"""Return the head or tail of each group
Parameters
----------
n
Number of entries to include (if negative, number of
entries to exclude)
take_head
Do we want the head or the tail of the group
preserve_order
If True, return the n rows from each group in original
dataframe order (this mimics pandas behavior though is
more expensive).
Returns
-------
New DataFrame or Series
Notes
-----
Unlike pandas, this returns an object in group order, not
original order, unless ``preserve_order`` is ``True``.
"""
# A more memory-efficient implementation would merge the take
# into the grouping, but that probably requires a new
# aggregation scheme in libcudf. This is probably "fast
# enough" for most reasonable input sizes.
_, offsets, _, group_values = self._grouped()
group_offsets = np.asarray(offsets, dtype=size_type_dtype)
size_per_group = np.diff(group_offsets)
# "Out of bounds" n for the group size either means no entries
# (negative) or all the entries (positive)
if n < 0:
size_per_group = np.maximum(
size_per_group + n, 0, out=size_per_group
)
else:
size_per_group = np.minimum(size_per_group, n, out=size_per_group)
if take_head:
group_offsets = group_offsets[:-1]
else:
group_offsets = group_offsets[1:] - size_per_group
to_take = np.arange(size_per_group.sum(), dtype=size_type_dtype)
fixup = np.empty_like(size_per_group)
fixup[0] = 0
np.cumsum(size_per_group[:-1], out=fixup[1:])
to_take += np.repeat(group_offsets - fixup, size_per_group)
to_take = as_column(to_take)
result = group_values.iloc[to_take]
if preserve_order:
# Can't use _mimic_pandas_order because we need to
# subsample the gather map from the full input ordering,
# rather than permuting the gather map of the output.
_, (ordering,), _ = self._groupby.groups(
[arange(0, len(self.obj))]
)
# Invert permutation from original order to groups on the
# subset of entries we want.
gather_map = ordering.take(to_take).argsort()
return result.take(gather_map)
else:
return result
@_cudf_nvtx_annotate
def head(self, n: int = 5, *, preserve_order: bool = True):
"""Return first n rows of each group
Parameters
----------
n
If positive: number of entries to include from start of group
If negative: number of entries to exclude from end of group
preserve_order
If True (default), return the n rows from each group in
original dataframe order (this mimics pandas behavior
though is more expensive). If you don't need rows in
original dataframe order you will see a performance
improvement by setting ``preserve_order=False``. In both
cases, the original index is preserved, so ``.loc``-based
indexing will work identically.
Returns
-------
Series or DataFrame
Subset of the original grouped object as determined by n
See Also
--------
.tail
Examples
--------
>>> df = cudf.DataFrame(
... {
... "a": [1, 0, 1, 2, 2, 1, 3, 2, 3, 3, 3],
... "b": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... }
... )
>>> df.groupby("a").head(1)
a b
0 1 0
1 0 1
3 2 3
6 3 6
>>> df.groupby("a").head(-2)
a b
0 1 0
3 2 3
6 3 6
8 3 8
"""
return self._head_tail(
n, take_head=True, preserve_order=preserve_order
)
@_cudf_nvtx_annotate
def tail(self, n: int = 5, *, preserve_order: bool = True):
"""Return last n rows of each group
Parameters
----------
n
If positive: number of entries to include from end of group
If negative: number of entries to exclude from start of group
preserve_order
If True (default), return the n rows from each group in
original dataframe order (this mimics pandas behavior
though is more expensive). If you don't need rows in
original dataframe order you will see a performance
improvement by setting ``preserve_order=False``. In both
cases, the original index is preserved, so ``.loc``-based
indexing will work identically.
Returns
-------
Series or DataFrame
Subset of the original grouped object as determined by n
See Also
--------
.head
Examples
--------
>>> df = cudf.DataFrame(
... {
... "a": [1, 0, 1, 2, 2, 1, 3, 2, 3, 3, 3],
... "b": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... }
... )
>>> df.groupby("a").tail(1)
a b
1 0 1
5 1 5
7 2 7
10 3 10
>>> df.groupby("a").tail(-2)
a b
5 1 5
7 2 7
9 3 9
10 3 10
"""
return self._head_tail(
n, take_head=False, preserve_order=preserve_order
)
@_cudf_nvtx_annotate
def nth(self, n):
"""
Return the nth row from each group.
"""
result = self.agg(lambda x: x.nth(n)).sort_index()
sizes = self.size().sort_index()
return result[sizes > n]
@_cudf_nvtx_annotate
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> df = cudf.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
index = self.grouping.keys.unique().sort_values()
num_groups = len(index)
_, has_null_group = bitmask_or([*index._columns])
if ascending:
# Count ascending from 0 to num_groups - 1
group_ids = cudf.Series._from_data({None: cp.arange(num_groups)})
elif has_null_group:
# Count descending from num_groups - 1 to 0, but subtract one more
# for the null group making it num_groups - 2 to -1.
group_ids = cudf.Series._from_data(
{None: cp.arange(num_groups - 2, -2, -1)}
)
else:
# Count descending from num_groups - 1 to 0
group_ids = cudf.Series._from_data(
{None: cp.arange(num_groups - 1, -1, -1)}
)
if has_null_group:
group_ids.iloc[-1] = cudf.NA
group_ids._index = index
return self._broadcast(group_ids)
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
weights: Union[abc.Sequence, "cudf.Series", None] = None,
random_state: Union[np.random.RandomState, int, None] = None,
):
"""Return a random sample of items in each group.
Parameters
----------
n
Number of items to return for each group, if sampling
without replacement must be at most the size of the
smallest group. Cannot be used with frac. Default is
``n=1`` if frac is None.
frac
Fraction of items to return. Cannot be used with n.
replace
Should sampling occur with or without replacement?
weights
Sampling probability for each element. Must be the same
length as the grouped frame. Not currently supported.
random_state
Seed for random number generation.
Returns
-------
New dataframe or series with samples of appropriate size drawn
from each group.
"""
if weights is not None:
# To implement this case again needs different algorithms
# in both cases.
#
# Without replacement, use the weighted reservoir sampling
# approach of Efraimidas and Spirakis (2006)
# https://doi.org/10.1016/j.ipl.2005.11.003, essentially,
# do a segmented argsort sorting on weight-scaled
# logarithmic deviates. See
# https://timvieira.github.io/blog/post/
# 2019/09/16/algorithms-for-sampling-without-replacement/
#
# With replacement is trickier, one might be able to use
# the alias method, otherwise we're back to bucketed
# rejection sampling.
raise NotImplementedError("Sampling with weights is not supported")
if frac is not None and n is not None:
raise ValueError("Cannot supply both of frac and n")
elif n is None and frac is None:
n = 1
elif frac is not None and not (0 <= frac <= 1):
raise ValueError(
"Sampling with fraction must provide fraction in "
f"[0, 1], got {frac=}"
)
# TODO: handle random states properly.
if random_state is not None and not isinstance(random_state, int):
raise NotImplementedError(
"Only integer seeds are supported for random_state "
"in this case"
)
# Get the groups
# TODO: convince Cython to convert the std::vector offsets
# into a numpy array directly, rather than a list.
# TODO: this uses the sort-based groupby, could one use hash-based?
_, offsets, _, group_values = self._grouped()
group_offsets = np.asarray(offsets, dtype=size_type_dtype)
size_per_group = np.diff(group_offsets)
if n is not None:
samples_per_group = np.broadcast_to(
size_type_dtype.type(n), size_per_group.shape
)
if not replace and (minsize := size_per_group.min()) < n:
raise ValueError(
f"Cannot sample {n=} without replacement, "
f"smallest group is {minsize}"
)
else:
# Pandas uses round-to-nearest, ties to even to
# pick sample sizes for the fractional case (unlike IEEE
# which is round-to-nearest, ties to sgn(x) * inf).
samples_per_group = np.round(
size_per_group * frac, decimals=0
).astype(size_type_dtype)
if replace:
# We would prefer to use cupy here, but their rng.integers
# interface doesn't take array-based low and high
# arguments.
low = 0
high = np.repeat(size_per_group, samples_per_group)
rng = np.random.default_rng(seed=random_state)
indices = rng.integers(low, high, dtype=size_type_dtype)
indices += np.repeat(group_offsets[:-1], samples_per_group)
else:
# Approach: do a segmented argsort of the index array and take
# the first samples_per_group entries from sorted array.
# We will shuffle the group indices and then pick them out
# from the grouped dataframe index.
nrows = len(group_values)
indices = cp.arange(nrows, dtype=size_type_dtype)
if len(size_per_group) < 500:
# Empirically shuffling with cupy is faster at this scale
rs = cp.random.get_random_state()
rs.seed(seed=random_state)
for off, size in zip(group_offsets, size_per_group):
rs.shuffle(indices[off : off + size])
else:
rng = cp.random.default_rng(seed=random_state)
(indices,) = segmented_sort_by_key(
[as_column(indices)],
[as_column(rng.random(size=nrows))],
as_column(group_offsets),
[],
[],
stable=True,
)
indices = cp.asarray(indices.data_array_view(mode="read"))
# Which indices are we going to want?
want = np.arange(samples_per_group.sum(), dtype=size_type_dtype)
scan = np.empty_like(samples_per_group)
scan[0] = 0
np.cumsum(samples_per_group[:-1], out=scan[1:])
want += np.repeat(group_offsets[:-1] - scan, samples_per_group)
indices = indices[want]
return group_values.iloc[indices]
def serialize(self):
header = {}
frames = []
header["kwargs"] = {
"sort": self._sort,
"dropna": self._dropna,
"as_index": self._as_index,
}
obj_header, obj_frames = self.obj.serialize()
header["obj"] = obj_header
header["obj_type"] = pickle.dumps(type(self.obj))
header["num_obj_frames"] = len(obj_frames)
frames.extend(obj_frames)
grouping_header, grouping_frames = self.grouping.serialize()
header["grouping"] = grouping_header
header["num_grouping_frames"] = len(grouping_frames)
frames.extend(grouping_frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
kwargs = header["kwargs"]
obj_type = pickle.loads(header["obj_type"])
obj = obj_type.deserialize(
header["obj"], frames[: header["num_obj_frames"]]
)
grouping = _Grouping.deserialize(
header["grouping"], frames[header["num_obj_frames"] :]
)
return cls(obj, grouping, **kwargs)
def _grouped(self):
grouped_key_cols, grouped_value_cols, offsets = self._groupby.groups(
[*self.obj._index._columns, *self.obj._columns]
)
grouped_keys = cudf.core.index._index_from_columns(grouped_key_cols)
if isinstance(self.grouping.keys, cudf.MultiIndex):
grouped_keys.names = self.grouping.keys.names
else:
grouped_keys.name = self.grouping.keys.name
grouped_values = self.obj._from_columns_like_self(
grouped_value_cols,
column_names=self.obj._column_names,
index_names=self.obj._index_names,
)
group_names = grouped_keys.unique().sort_values()
return (group_names, offsets, grouped_keys, grouped_values)
def _normalize_aggs(
self, aggs: MultiColumnAggType
) -> Tuple[Iterable[Any], Tuple[ColumnBase, ...], List[List[AggType]]]:
"""
Normalize aggs to a list of list of aggregations, where `out[i]`
is a list of aggregations for column `self.obj[i]`. We support three
different form of `aggs` input here:
- A single agg, such as "sum". This agg is applied to all value
columns.
- A list of aggs, such as ["sum", "mean"]. All aggs are applied to all
value columns.
- A mapping of column name to aggs, such as
{"a": ["sum"], "b": ["mean"]}, the aggs are applied to specified
column.
Each agg can be string or lambda functions.
"""
aggs_per_column: Iterable[Union[AggType, Iterable[AggType]]]
if isinstance(aggs, dict):
column_names, aggs_per_column = aggs.keys(), aggs.values()
columns = tuple(self.obj._data[col] for col in column_names)
else:
values = self.grouping.values
column_names = values._column_names
columns = values._columns
aggs_per_column = (aggs,) * len(columns)
# is_list_like performs type narrowing but type-checkers don't
# know it. One could add a TypeGuard annotation to
# is_list_like (see PEP647), but that is less useful than it
# seems because unlike the builtin narrowings it only performs
# narrowing in the positive case.
normalized_aggs = [
list(agg) if is_list_like(agg) else [agg] # type: ignore
for agg in aggs_per_column
]
return column_names, columns, normalized_aggs
@_cudf_nvtx_annotate
def pipe(self, func, *args, **kwargs):
"""
Apply a function `func` with arguments to this GroupBy
object and return the function's result.
Parameters
----------
func : function
Function to apply to this GroupBy object or,
alternatively, a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the GroupBy object.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
cudf.Series.pipe
Apply a function with arguments to a series.
cudf.DataFrame.pipe
Apply a function with arguments to a dataframe.
apply
Apply function to each group instead of to the full GroupBy object.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value
in one pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2
"""
return cudf.core.common.pipe(self, func, *args, **kwargs)
@_cudf_nvtx_annotate
def _jit_groupby_apply(
self, function, group_names, offsets, group_keys, grouped_values, *args
):
# Nulls are not yet supported
if self.grouping._obj._has_nulls:
raise ValueError("Nulls not yet supported with groupby JIT engine")
chunk_results = jit_groupby_apply(
offsets, grouped_values, function, *args
)
result = cudf.Series._from_data(
{None: chunk_results}, index=group_names
)
result.index.names = self.grouping.names
return result
@_cudf_nvtx_annotate
def _iterative_groupby_apply(
self, function, group_names, offsets, group_keys, grouped_values, *args
):
ngroups = len(offsets) - 1
if ngroups > self._MAX_GROUPS_BEFORE_WARN:
warnings.warn(
f"GroupBy.apply() performance scales poorly with "
f"number of groups. Got {ngroups} groups. Some functions "
"may perform better by passing engine='jit'",
RuntimeWarning,
)
chunks = [
grouped_values[s:e] for s, e in zip(offsets[:-1], offsets[1:])
]
chunk_results = [function(chk, *args) for chk in chunks]
return self._post_process_chunk_results(
chunk_results, group_names, group_keys, grouped_values
)
def _post_process_chunk_results(
self, chunk_results, group_names, group_keys, grouped_values
):
if not len(chunk_results):
return self.obj.head(0)
if cudf.api.types.is_scalar(chunk_results[0]):
result = cudf.Series._from_data(
{None: chunk_results}, index=group_names
)
result.index.names = self.grouping.names
return result
elif isinstance(chunk_results[0], cudf.Series) and isinstance(
self.obj, cudf.DataFrame
):
# When the UDF is like df.sum(), the result for each
# group is a row-like "Series" where the index labels
# are the same as the original calling DataFrame
if _is_row_of(chunk_results[0], self.obj):
result = cudf.concat(chunk_results, axis=1).T
result.index = group_names
result.index.names = self.grouping.names
# When the UDF is like df.x + df.y, the result for each
# group is the same length as the original group
elif len(self.obj) == sum(len(chk) for chk in chunk_results):
result = cudf.concat(chunk_results)
index_data = group_keys._data.copy(deep=True)
index_data[None] = grouped_values.index._column
result.index = cudf.MultiIndex._from_data(index_data)
else:
raise TypeError(
"Error handling Groupby apply output with input of "
f"type {type(self.obj)} and output of "
f"type {type(chunk_results[0])}"
)
else:
result = cudf.concat(chunk_results)
if self._group_keys:
index_data = group_keys._data.copy(deep=True)
index_data[None] = grouped_values.index._column
result.index = cudf.MultiIndex._from_data(index_data)
return result
@_cudf_nvtx_annotate
def apply(self, function, *args, engine="auto"):
"""Apply a python transformation function over the grouped chunk.
Parameters
----------
function : callable
The python transformation function that will be applied
on the grouped chunk.
args : tuple
Optional positional arguments to pass to the function.
engine: 'auto', 'cudf', or 'jit', default 'auto'
Selects the GroupBy.apply implementation. Use `jit` to
select the numba JIT pipeline. Only certain operations are allowed
within the function when using this option: min, max, sum, mean, var,
std, idxmax, and idxmin and any arithmetic formula involving them are
allowed. Binary operations are not yet supported, so syntax like
`df['x'] * 2` is not yet allowed.
For more information, see the `cuDF guide to user defined functions
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>`__.
Use `cudf` to select the iterative groupby apply algorithm which aims
to provide maximum flexibility at the expense of performance.
The default value `auto` will attempt to use the numba JIT pipeline
where possible and will fall back to the iterative algorithm if
necessary.
Examples
--------
.. code-block:: python
from cudf import DataFrame
df = DataFrame()
df['key'] = [0, 0, 1, 1, 2, 2, 2]
df['val'] = [0, 1, 2, 3, 4, 5, 6]
groups = df.groupby(['key'])
# Define a function to apply to each row in a group
def mult(df):
df['out'] = df['key'] * df['val']
return df
result = groups.apply(mult)
print(result)
Output:
.. code-block:: python
key val out
0 0 0 0
1 0 1 0
2 1 2 2
3 1 3 3
4 2 4 8
5 2 5 10
6 2 6 12
.. pandas-compat::
**groupby.apply**
cuDF's ``groupby.apply`` is limited compared to pandas.
In some situations, Pandas returns the grouped keys as part of
the index while cudf does not due to redundancy. For example:
.. code-block::
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'a': [1, 1, 2, 2],
... 'b': [1, 2, 1, 2],
... 'c': [1, 2, 3, 4],
... })
>>> gdf = cudf.from_pandas(df)
>>> df.groupby('a').apply(lambda x: x.iloc[[0]])
a b c
a
1 0 1 1 1
2 2 2 1 3
>>> gdf.groupby('a').apply(lambda x: x.iloc[[0]])
a b c
0 1 1 1
2 2 1 3
``engine='jit'`` may be used to accelerate certain functions,
initially those that contain reductions and arithmetic operations
between results of those reductions:
>>> import cudf
>>> df = cudf.DataFrame({'a':[1,1,2,2,3,3], 'b':[1,2,3,4,5,6]})
>>> df.groupby('a').apply(
... lambda group: group['b'].max() - group['b'].min(),
... engine='jit'
... )
a
1 1
2 1
3 1
dtype: int64
"""
if self.obj.empty:
res = self.obj.copy(deep=True)
res.index = self.grouping.keys
if function in {"sum", "product"}:
# For `sum` & `product`, boolean types
# will need to result in `int64` type.
for name, col in res._data.items():
if is_bool_dtype(col.dtype):
res._data[name] = col.astype("int")
return res
if not callable(function):
raise TypeError(f"type {type(function)} is not callable")
group_names, offsets, group_keys, grouped_values = self._grouped()
if engine == "auto":
if (not grouped_values._has_nulls) and _can_be_jitted(
grouped_values, function, args
):
engine = "jit"
else:
engine = "cudf"
if engine == "jit":
result = self._jit_groupby_apply(
function,
group_names,
offsets,
group_keys,
grouped_values,
*args,
)
elif engine == "cudf":
result = self._iterative_groupby_apply(
function,
group_names,
offsets,
group_keys,
grouped_values,
*args,
)
else:
raise ValueError(f"Unsupported engine '{engine}'")
if self._sort:
result = result.sort_index()
if self._as_index is False:
result = result.reset_index()
result[None] = result.pop(0)
return result
@_cudf_nvtx_annotate
def apply_grouped(self, function, **kwargs):
"""Apply a transformation function over the grouped chunk.
This uses numba's CUDA JIT compiler to convert the Python
transformation function into a CUDA kernel, thus will have a
compilation overhead during the first run.
Parameters
----------
func : function
The transformation function that will be executed on the CUDA GPU.
incols: list
A list of names of input columns.
outcols: list
A dictionary of output column names and their dtype.
kwargs : dict
name-value of extra arguments. These values are passed directly into
the function.
Examples
--------
.. code-block:: python
from cudf import DataFrame
from numba import cuda
import numpy as np
df = DataFrame()
df['key'] = [0, 0, 1, 1, 2, 2, 2]
df['val'] = [0, 1, 2, 3, 4, 5, 6]
groups = df.groupby(['key'])
# Define a function to apply to each group
def mult_add(key, val, out1, out2):
for i in range(cuda.threadIdx.x, len(key), cuda.blockDim.x):
out1[i] = key[i] * val[i]
out2[i] = key[i] + val[i]
result = groups.apply_grouped(mult_add,
incols=['key', 'val'],
outcols={'out1': np.int32,
'out2': np.int32},
# threads per block
tpb=8)
print(result)
Output:
.. code-block:: python
key val out1 out2
0 0 0 0 0
1 0 1 0 1
2 1 2 2 3
3 1 3 3 4
4 2 4 8 6
5 2 5 10 7
6 2 6 12 8
.. code-block:: python
import cudf
import numpy as np
from numba import cuda
import pandas as pd
from random import randint
# Create a random 15 row dataframe with one categorical
# feature and one random integer valued feature
df = cudf.DataFrame(
{
"cat": [1] * 5 + [2] * 5 + [3] * 5,
"val": [randint(0, 100) for _ in range(15)],
}
)
# Group the dataframe by its categorical feature
groups = df.groupby("cat")
# Define a kernel which takes the moving average of a
# sliding window
def rolling_avg(val, avg):
win_size = 3
for i in range(cuda.threadIdx.x, len(val), cuda.blockDim.x):
if i < win_size - 1:
# If there is not enough data to fill the window,
# take the average to be NaN
avg[i] = np.nan
else:
total = 0
for j in range(i - win_size + 1, i + 1):
total += val[j]
avg[i] = total / win_size
# Compute moving averages on all groups
results = groups.apply_grouped(rolling_avg,
incols=['val'],
outcols=dict(avg=np.float64))
print("Results:", results)
# Note this gives the same result as its pandas equivalent
pdf = df.to_pandas()
pd_results = pdf.groupby('cat')['val'].rolling(3).mean()
Output:
.. code-block:: python
Results:
cat val avg
0 1 16
1 1 45
2 1 62 41.0
3 1 45 50.666666666666664
4 1 26 44.333333333333336
5 2 5
6 2 51
7 2 77 44.333333333333336
8 2 1 43.0
9 2 46 41.333333333333336
[5 more rows]
This is functionally equivalent to `pandas.DataFrame.Rolling
<https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.rolling.html>`_
"""
if not callable(function):
raise TypeError(f"type {type(function)} is not callable")
_, offsets, _, grouped_values = self._grouped()
kwargs.update({"chunks": offsets})
return grouped_values.apply_chunks(function, **kwargs)
@_cudf_nvtx_annotate
def _broadcast(self, values):
"""
Broadcast the results of an aggregation to the group
Parameters
----------
values: Series
A Series representing the results of an aggregation. The
index of the Series must be the (unique) values
representing the group keys.
Returns
-------
A Series of the same size and with the same index as
``self.obj``.
"""
if not values.index.equals(self.grouping.keys):
values = values._align_to_index(
self.grouping.keys, how="right", allow_non_unique=True
)
values.index = self.obj.index
return values
@_cudf_nvtx_annotate
def transform(self, function):
"""Apply an aggregation, then broadcast the result to the group size.
Parameters
----------
function: str or callable
Aggregation to apply to each group. Note that the set of
operations currently supported by `transform` is identical
to that supported by the `agg` method.
Returns
-------
A Series or DataFrame of the same size as the input, with the
result of the aggregation per group broadcasted to the group
size.
Examples
--------
.. code-block:: python
import cudf
df = cudf.DataFrame({'a': [2, 1, 1, 2, 2], 'b': [1, 2, 3, 4, 5]})
df.groupby('a').transform('max')
b
0 5
1 3
2 3
3 5
4 5
See Also
--------
agg
"""
try:
result = self.agg(function)
except TypeError as e:
raise NotImplementedError(
"Currently, `transform()` supports only aggregations."
) from e
return self._broadcast(result)
def rolling(self, *args, **kwargs):
"""
Returns a `RollingGroupby` object that enables rolling window
calculations on the groups.
See Also
--------
cudf.core.window.Rolling
"""
return cudf.core.window.rolling.RollingGroupby(self, *args, **kwargs)
@_cudf_nvtx_annotate
def count(self, dropna=True):
"""Compute the number of values in each column.
Parameters
----------
dropna : bool
If ``True``, don't include null values in the count.
"""
def func(x):
return getattr(x, "count")(dropna=dropna)
return self.agg(func)
@_cudf_nvtx_annotate
def describe(self, include=None, exclude=None):
"""
Generate descriptive statistics that summarizes the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Analyzes numeric DataFrames only
Parameters
----------
include: 'all', list-like of dtypes or None (default), optional
list of data types to include in the result.
Ignored for Series.
exclude: list-like of dtypes or None (default), optional,
list of data types to omit from the result.
Ignored for Series.
Returns
-------
Series or DataFrame
Summary statistics of the Dataframe provided.
Examples
--------
>>> import cudf
>>> gdf = cudf.DataFrame({
... "Speed": [380.0, 370.0, 24.0, 26.0],
... "Score": [50, 30, 90, 80],
... })
>>> gdf
Speed Score
0 380.0 50
1 370.0 30
2 24.0 90
3 26.0 80
>>> gdf.groupby('Score').describe()
Speed
count mean std min 25% 50% 75% max
Score
30 1 370.0 <NA> 370.0 370.0 370.0 370.0 370.0
50 1 380.0 <NA> 380.0 380.0 380.0 380.0 380.0
80 1 26.0 <NA> 26.0 26.0 26.0 26.0 26.0
90 1 24.0 <NA> 24.0 24.0 24.0 24.0 24.0
"""
if exclude is not None and include is not None:
raise NotImplementedError
res = self.agg(
[
"count",
"mean",
"std",
"min",
_quantile_25,
_quantile_50,
_quantile_75,
"max",
]
)
res.rename(
columns={
"_quantile_25": "25%",
"_quantile_50": "50%",
"_quantile_75": "75%",
},
level=1,
inplace=True,
)
return res
@_cudf_nvtx_annotate
def corr(self, method="pearson", min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method: {"pearson", "kendall", "spearman"} or callable,
default "pearson". Currently only the pearson correlation
coefficient is supported.
min_periods: int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
Correlation matrix.
Examples
--------
>>> import cudf
>>> gdf = cudf.DataFrame({
... "id": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
... "val1": [5, 4, 6, 4, 8, 7, 4, 5, 2],
... "val2": [4, 5, 6, 1, 2, 9, 8, 5, 1],
... "val3": [4, 5, 6, 1, 2, 9, 8, 5, 1]})
>>> gdf
id val1 val2 val3
0 a 5 4 4
1 a 4 5 5
2 a 6 6 6
3 b 4 1 1
4 b 8 2 2
5 b 7 9 9
6 c 4 8 8
7 c 5 5 5
8 c 2 1 1
>>> gdf.groupby("id").corr(method="pearson")
val1 val2 val3
id
a val1 1.000000 0.500000 0.500000
val2 0.500000 1.000000 1.000000
val3 0.500000 1.000000 1.000000
b val1 1.000000 0.385727 0.385727
val2 0.385727 1.000000 1.000000
val3 0.385727 1.000000 1.000000
c val1 1.000000 0.714575 0.714575
val2 0.714575 1.000000 1.000000
val3 0.714575 1.000000 1.000000
"""
if method.lower() not in ("pearson",):
raise NotImplementedError(
"Only pearson correlation is currently supported"
)
return self._cov_or_corr(
lambda x: x.corr(method, min_periods), "Correlation"
)
@_cudf_nvtx_annotate
def cov(self, min_periods=0, ddof=1):
"""
Compute the pairwise covariance among the columns of a DataFrame,
excluding NA/null values.
The returned DataFrame is the covariance matrix of the columns of
the DataFrame.
Both NA and null values are automatically excluded from the
calculation. See the note below about bias from missing values.
A threshold can be set for the minimum number of observations
for each value created. Comparisons with observations below this
threshold will be returned as `NA`.
This method is generally used for the analysis of time series data to
understand the relationship between different measures across time.
Parameters
----------
min_periods: int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof: int, optional
Delta degrees of freedom, default is 1.
Returns
-------
DataFrame
Covariance matrix.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data
(assuming that data is missing at random) the returned covariance
matrix will be an unbiased estimate of the variance and covariance
between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be
positive semi-definite. This could lead to estimate correlations
having absolute values which are greater than one, and/or a
non-invertible covariance matrix. See
`Estimation of covariance matrices
<https://en.wikipedia.org/wiki/Estimation_of_covariance_matrices>`
for more details.
Examples
--------
>>> import cudf
>>> gdf = cudf.DataFrame({
... "id": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
... "val1": [5, 4, 6, 4, 8, 7, 4, 5, 2],
... "val2": [4, 5, 6, 1, 2, 9, 8, 5, 1],
... "val3": [4, 5, 6, 1, 2, 9, 8, 5, 1],
... })
>>> gdf
id val1 val2 val3
0 a 5 4 4
1 a 4 5 5
2 a 6 6 6
3 b 4 1 1
4 b 8 2 2
5 b 7 9 9
6 c 4 8 8
7 c 5 5 5
8 c 2 1 1
>>> gdf.groupby("id").cov()
val1 val2 val3
id
a val1 1.000000 0.500000 0.500000
val2 0.500000 1.000000 1.000000
val3 0.500000 1.000000 1.000000
b val1 4.333333 3.500000 3.500000
val2 3.500000 19.000000 19.000000
val3 3.500000 19.000000 19.000000
c val1 2.333333 3.833333 3.833333
val2 3.833333 12.333333 12.333333
val3 3.833333 12.333333 12.333333
"""
return self._cov_or_corr(
lambda x: x.cov(min_periods, ddof), "Covariance"
)
def _cov_or_corr(self, func, method_name):
"""
Internal function that is called by either corr() or cov()
for sort groupby correlation and covariance computations,
respectively.
"""
# create expanded dataframe consisting all combinations of the
# struct columns-pairs to be used in the correlation or covariance
# i.e. (('col1', 'col1'), ('col1', 'col2'), ('col2', 'col2'))
column_names = self.grouping.values._column_names
num_cols = len(column_names)
column_pair_structs = {}
for x, y in itertools.combinations_with_replacement(column_names, 2):
# The number of output columns is the number of input columns
# squared. We directly call the struct column factory here to
# reduce overhead and avoid copying data. Since libcudf groupby
# maintains a cache of aggregation requests, reusing the same
# column also makes use of previously cached column means and
# reduces kernel costs.
# checks if input column names are string, raise a warning if
# not so and cast them to strings
if not (isinstance(x, str) and isinstance(y, str)):
warnings.warn(
"DataFrame contains non-string column name(s). "
"Struct columns require field names to be strings. "
"Non-string column names will be cast to strings "
"in the result's field names."
)
x, y = str(x), str(y)
column_pair_structs[(x, y)] = cudf.core.column.build_struct_column(
names=(x, y),
children=(self.obj._data[x], self.obj._data[y]),
size=len(self.obj),
)
column_pair_groupby = cudf.DataFrame._from_data(
column_pair_structs
).groupby(by=self.grouping.keys)
try:
gb_cov_corr = column_pair_groupby.agg(func)
except RuntimeError as e:
if "Unsupported groupby reduction type-agg combination" in str(e):
raise TypeError(
f"{method_name} accepts only numerical column-pairs"
)
raise
# ensure that column-pair labels are arranged in ascending order
cols_list = [
(y, x) if i > j else (x, y)
for j, y in enumerate(column_names)
for i, x in enumerate(column_names)
]
cols_split = [
cols_list[i : i + num_cols]
for i in range(0, len(cols_list), num_cols)
]
# interleave: combines the correlation or covariance results for each
# column-pair into a single column
res = cudf.DataFrame._from_data(
{
x: interleave_columns([gb_cov_corr._data[y] for y in ys])
for ys, x in zip(cols_split, column_names)
}
)
# create a multiindex for the groupby covariance or correlation
# dataframe, to match pandas behavior
unsorted_idx = gb_cov_corr.index.repeat(num_cols)
idx_sort_order = unsorted_idx._get_sorted_inds()
sorted_idx = unsorted_idx._gather(idx_sort_order)
if len(gb_cov_corr):
# TO-DO: Should the operation below be done on the CPU instead?
sorted_idx._data[None] = as_column(
np.tile(column_names, len(gb_cov_corr.index))
)
res.index = MultiIndex._from_data(sorted_idx._data)
return res
@_cudf_nvtx_annotate
def var(self, ddof=1):
"""Compute the column-wise variance of the values in each group.
Parameters
----------
ddof : int
The delta degrees of freedom. N - ddof is the divisor used to
normalize the variance.
"""
def func(x):
return getattr(x, "var")(ddof=ddof)
return self.agg(func)
@_cudf_nvtx_annotate
def std(self, ddof=1):
"""Compute the column-wise std of the values in each group.
Parameters
----------
ddof : int
The delta degrees of freedom. N - ddof is the divisor used to
normalize the standard deviation.
"""
def func(x):
return getattr(x, "std")(ddof=ddof)
return self.agg(func)
@_cudf_nvtx_annotate
def quantile(self, q=0.5, interpolation="linear"):
"""Compute the column-wise quantiles of the values in each group.
Parameters
----------
q : float or array-like
The quantiles to compute.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}
The interpolation method to use when the desired quantile lies
between two data points. Defaults to "linear".
"""
def func(x):
return getattr(x, "quantile")(q=q, interpolation=interpolation)
return self.agg(func)
@_cudf_nvtx_annotate
def collect(self):
"""Get a list of all the values for each column in each group."""
return self.agg("collect")
@_cudf_nvtx_annotate
def unique(self):
"""Get a list of the unique values for each column in each group."""
return self.agg("unique")
@_cudf_nvtx_annotate
def diff(self, periods=1, axis=0):
"""Get the difference between the values in each group.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference,
accepts negative values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Only row-wise (0) shift is supported.
Returns
-------
Series or DataFrame
First differences of the Series or DataFrame.
"""
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
values = self.obj.__class__._from_data(
self.grouping.values._data, self.obj.index
)
return values - self.shift(periods=periods)
def _scan_fill(self, method: str, limit: int) -> DataFrameOrSeries:
"""Internal implementation for `ffill` and `bfill`"""
values = self.grouping.values
result = self.obj._from_columns(
self._groupby.replace_nulls([*values._columns], method),
values._column_names,
)
result = self._mimic_pandas_order(result)
return result._copy_type_metadata(values)
@_cudf_nvtx_annotate
def pad(self, limit=None):
"""Forward fill NA values.
.. deprecated:: 23.06
`pad` is deprecated, use `ffill` instead.
Parameters
----------
limit : int, default None
Unsupported
"""
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"pad is deprecated and will be removed in a future version. "
"Use ffill instead.",
FutureWarning,
)
return self._scan_fill("ffill", limit)
def ffill(self, limit=None):
"""Forward fill NA values.
Parameters
----------
limit : int, default None
Unsupported
"""
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
return self._scan_fill("ffill", limit)
@_cudf_nvtx_annotate
def backfill(self, limit=None):
"""Backward fill NA values.
.. deprecated:: 23.06
`backfill` is deprecated, use `bfill` instead.
Parameters
----------
limit : int, default None
Unsupported
"""
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"backfill is deprecated and will be removed in a future version. "
"Use bfill instead.",
FutureWarning,
)
return self._scan_fill("bfill", limit)
def bfill(self, limit=None):
"""Backward fill NA values.
Parameters
----------
limit : int, default None
Unsupported
"""
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
return self._scan_fill("bfill", limit)
@_cudf_nvtx_annotate
def fillna(
self,
value=None,
method=None,
axis=0,
inplace=False,
limit=None,
downcast=None,
):
"""Fill NA values using the specified method.
Parameters
----------
value : scalar, dict
Value to use to fill the holes. Cannot be specified with method.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
- pad/ffill: propagate last valid observation forward to next valid
- backfill/bfill: use next valid observation to fill gap
axis : {0 or 'index', 1 or 'columns'}
Unsupported
inplace : bool, default False
If `True`, fill inplace. Note: this will modify other views on this
object.
limit : int, default None
Unsupported
downcast : dict, default None
Unsupported
Returns
-------
DataFrame or Series
"""
if inplace:
raise NotImplementedError("Does not support inplace yet.")
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
if downcast is not None:
raise NotImplementedError("Does not support downcast yet.")
if not axis == 0:
raise NotImplementedError("Only support axis == 0.")
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
if value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
if method is not None:
if method not in {"pad", "ffill", "backfill", "bfill"}:
raise ValueError(
"Method can only be of 'pad', 'ffill',"
"'backfill', 'bfill'."
)
return getattr(self, method, limit)()
values = self.obj.__class__._from_data(
self.grouping.values._data, self.obj.index
)
return values.fillna(
value=value, inplace=inplace, axis=axis, limit=limit
)
@_cudf_nvtx_annotate
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""
Shift each group by ``periods`` positions.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, unsupported
axis : 0, axis to shift
Shift direction. Only row-wise shift is supported
fill_value : scalar or list of scalars, optional
The scalar value to use for newly introduced missing values. Can be
specified with `None`, a single value or multiple values:
- `None` (default): sets all indeterminable values to null.
- Single value: fill all shifted columns with this value. Should
match the data type of all columns.
- List of values: fill shifted columns with corresponding value in
the list. The length of the list should match the number of
columns shifted. Each value should match the data type of the
column to fill.
Returns
-------
Series or DataFrame
Object shifted within each group.
Notes
-----
Parameter ``freq`` is unsupported.
"""
if freq is not None:
raise NotImplementedError("Parameter freq is unsupported.")
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
values = self.grouping.values
if is_list_like(fill_value):
if len(fill_value) != len(values._data):
raise ValueError(
"Mismatched number of columns and values to fill."
)
else:
fill_value = [fill_value] * len(values._data)
result = self.obj.__class__._from_columns(
self._groupby.shift([*values._columns], periods, fill_value)[0],
values._column_names,
)
result = self._mimic_pandas_order(result)
return result._copy_type_metadata(values)
@_cudf_nvtx_annotate
def pct_change(
self, periods=1, fill_method="ffill", axis=0, limit=None, freq=None
):
"""
Calculates the percent change between sequential elements
in the group.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'ffill'
How to handle NAs before computing percent changes.
limit : int, optional
The number of consecutive NAs to fill before stopping.
Not yet implemented.
freq : str, optional
Increment to use from time series API.
Not yet implemented.
Returns
-------
Series or DataFrame
Percentage changes within each group
"""
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
if limit is not None:
raise NotImplementedError("limit parameter not supported yet.")
if freq is not None:
raise NotImplementedError("freq parameter not supported yet.")
elif fill_method not in {"ffill", "pad", "bfill", "backfill"}:
raise ValueError(
"fill_method must be one of 'ffill', 'pad', "
"'bfill', or 'backfill'."
)
if fill_method in ("pad", "backfill"):
alternative = "ffill" if fill_method == "pad" else "bfill"
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"{fill_method} is deprecated and will be removed in a future "
f"version. Use f{alternative} instead.",
FutureWarning,
)
filled = self.fillna(method=fill_method, limit=limit)
fill_grp = filled.groupby(self.grouping)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
def value_counts(
self,
subset=None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
) -> DataFrameOrSeries:
"""
Return a Series or DataFrame containing counts of unique rows.
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
dropna : bool, default True
Don't include counts of rows that contain NA values.
Returns
-------
Series or DataFrame
Series if the groupby as_index is True, otherwise DataFrame.
See Also
--------
Series.value_counts: Equivalent method on Series.
DataFrame.value_counts: Equivalent method on DataFrame.
SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
Notes
-----
- If the groupby as_index is True then the returned Series will have a
MultiIndex with one level per input column.
- If the groupby as_index is False then the returned DataFrame will
have an additional column with the value_counts. The column is
labelled 'count' or 'proportion', depending on the ``normalize``
parameter.
By default, rows that contain any NA values are omitted from
the result.
By default, the result will be in descending order so that the
first element of each group is the most frequently-occurring row.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({
... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
... })
>>> df
gender education country
0 male low US
1 male medium FR
2 female high US
3 male low FR
4 female high FR
5 male low FR
>>> df.groupby('gender').value_counts()
gender education country
female high FR 1
US 1
male low FR 2
US 1
medium FR 1
Name: count, dtype: int64
>>> df.groupby('gender').value_counts(ascending=True)
gender education country
female high FR 1
US 1
male low US 1
medium FR 1
low FR 2
Name: count, dtype: int64
>>> df.groupby('gender').value_counts(normalize=True)
gender education country
female high FR 0.50
US 0.50
male low FR 0.50
US 0.25
medium FR 0.25
Name: proportion, dtype: float64
>>> df.groupby('gender', as_index=False).value_counts()
gender education country count
0 female high FR 1
1 female high US 1
2 male low FR 2
3 male low US 1
4 male medium FR 1
>>> df.groupby('gender', as_index=False).value_counts(normalize=True)
gender education country proportion
0 female high FR 0.50
1 female high US 0.50
2 male low FR 0.50
3 male low US 0.25
4 male medium FR 0.25
"""
df = cudf.DataFrame.copy(self.obj)
groupings = self.grouping.names
name = "proportion" if normalize else "count"
if subset is None:
subset = [i for i in df._column_names if i not in groupings]
# Check subset exists in dataframe
elif set(subset) - set(df._column_names):
raise ValueError(
f"Keys {set(subset) - set(df._column_names)} in subset "
f"do not exist in the DataFrame."
)
# Catch case where groupby and subset share an element
elif set(subset) & set(groupings):
raise ValueError(
f"Keys {set(subset) & set(groupings)} in subset "
"cannot be in the groupby column keys."
)
df["__placeholder"] = 1
result = (
df.groupby(groupings + list(subset), dropna=dropna)[
"__placeholder"
]
.count()
.sort_index()
.astype(np.int64)
)
if normalize:
levels = list(range(len(groupings), result.index.nlevels))
result /= result.groupby(
result.index.droplevel(levels),
).transform("sum")
if sort:
result = result.sort_values(ascending=ascending).sort_index(
level=range(len(groupings)), sort_remaining=False
)
if not self._as_index:
if name in df._column_names:
raise ValueError(
f"Column label '{name}' is duplicate of result column"
)
result.name = name
result = result.to_frame().reset_index()
else:
result.name = name
return result
def _mimic_pandas_order(
self, result: DataFrameOrSeries
) -> DataFrameOrSeries:
"""Given a groupby result from libcudf, reconstruct the row orders
matching that of pandas. This also adds appropriate indices.
"""
# TODO: copy metadata after this method is a common pattern, should
# merge in this method.
# This function is used to reorder the results of scan-based
# groupbys which have the same output size as input size.
# However, if the grouping key has NAs and dropna=True, the
# result coming back from libcudf has null_count few rows than
# the input, so we must produce an ordering from the full
# input range.
_, (ordering,), _ = self._groupby.groups([arange(0, len(self.obj))])
if self._dropna and any(
c.has_nulls(include_nan=True) > 0
for c in self.grouping._key_columns
):
# Scan aggregations with null/nan keys put nulls in the
# corresponding output rows in pandas, to do that here
# expand the result by reindexing.
ri = cudf.RangeIndex(0, len(self.obj))
result.index = cudf.Index(ordering)
# This reorders and expands
result = result.reindex(ri)
else:
# Just reorder according to the groupings
result = result.take(ordering.argsort())
# Now produce the actual index we first thought of
result.index = self.obj.index
return result
class DataFrameGroupBy(GroupBy, GetAttrGetItemMixin):
obj: "cudf.core.dataframe.DataFrame"
_PROTECTED_KEYS = frozenset(("obj",))
def __getitem__(self, key):
return self.obj[key].groupby(
by=self.grouping.keys,
dropna=self._dropna,
sort=self._sort,
group_keys=self._group_keys,
as_index=self._as_index,
)
DataFrameGroupBy.__doc__ = groupby_doc_template.format(ret="")
class SeriesGroupBy(GroupBy):
obj: "cudf.core.series.Series"
def agg(self, func):
result = super().agg(func)
# downcast the result to a Series:
if len(result._data):
if result.shape[1] == 1 and not is_list_like(func):
return result.iloc[:, 0]
# drop the first level if we have a multiindex
if result._data.nlevels > 1:
result.columns = result._data.to_pandas_index().droplevel(0)
return result
def apply(self, func, *args):
result = super().apply(func, *args)
# apply Series name to result
result.name = self.obj.name
return result
SeriesGroupBy.__doc__ = groupby_doc_template.format(ret="")
# TODO: should we define this as a dataclass instead?
class Grouper:
def __init__(
self, key=None, level=None, freq=None, closed=None, label=None
):
if key is not None and level is not None:
raise ValueError("Grouper cannot specify both key and level")
if (key, level) == (None, None) and not freq:
raise ValueError("Grouper must specify either key or level")
self.key = key
self.level = level
self.freq = freq
self.closed = closed
self.label = label
class _Grouping(Serializable):
def __init__(self, obj, by=None, level=None):
self._obj = obj
self._key_columns = []
self.names = []
# Need to keep track of named key columns
# to support `as_index=False` correctly
self._named_columns = []
self._handle_by_or_level(by, level)
if len(obj) and not len(self._key_columns):
raise ValueError("No group keys passed")
def _handle_by_or_level(self, by=None, level=None):
if level is not None:
if by is not None:
raise ValueError("Cannot specify both by and level")
level_list = level if isinstance(level, list) else [level]
for level in level_list:
self._handle_level(level)
else:
by_list = by if isinstance(by, list) else [by]
for by in by_list:
if callable(by):
self._handle_callable(by)
elif isinstance(by, cudf.Series):
self._handle_series(by)
elif isinstance(by, cudf.BaseIndex):
self._handle_index(by)
elif isinstance(by, abc.Mapping):
self._handle_mapping(by)
elif isinstance(by, Grouper):
self._handle_grouper(by)
elif isinstance(by, pd.Series):
self._handle_series(cudf.Series.from_pandas(by))
elif isinstance(by, pd.Index):
self._handle_index(cudf.Index.from_pandas(by))
else:
try:
self._handle_label(by)
except (KeyError, TypeError):
self._handle_misc(by)
@property
def keys(self):
"""Return grouping key columns as index"""
nkeys = len(self._key_columns)
if nkeys == 0:
return cudf.core.index.as_index([], name=None)
elif nkeys > 1:
return cudf.MultiIndex._from_data(
dict(zip(range(nkeys), self._key_columns))
)._set_names(self.names)
else:
return cudf.core.index.as_index(
self._key_columns[0], name=self.names[0]
)
@property
def values(self) -> cudf.core.frame.Frame:
"""Return value columns as a frame.
Note that in aggregation, value columns can be arbitrarily
specified. While this method returns all non-key columns from `obj` as
a frame.
This is mainly used in transform-like operations.
"""
# If the key columns are in `obj`, filter them out
value_column_names = [
x for x in self._obj._data.names if x not in self._named_columns
]
value_columns = self._obj._data.select_by_label(value_column_names)
return self._obj.__class__._from_data(value_columns)
def _handle_callable(self, by):
by = by(self._obj.index)
self.__init__(self._obj, by)
def _handle_series(self, by):
by = by._align_to_index(self._obj.index, how="right")
self._key_columns.append(by._column)
self.names.append(by.name)
def _handle_index(self, by):
self._key_columns.extend(by._data.columns)
self.names.extend(by._data.names)
def _handle_mapping(self, by):
by = cudf.Series(by.values(), index=by.keys())
self._handle_series(by)
def _handle_label(self, by):
try:
self._key_columns.append(self._obj._data[by])
except KeyError as e:
# `by` can be index name(label) too.
if by in self._obj._index.names:
self._key_columns.append(self._obj._index._data[by])
else:
raise e
self.names.append(by)
self._named_columns.append(by)
def _handle_grouper(self, by):
if by.freq:
self._handle_frequency_grouper(by)
elif by.key:
self._handle_label(by.key)
else:
self._handle_level(by.level)
def _handle_frequency_grouper(self, by):
raise NotImplementedError()
def _handle_level(self, by):
level_values = self._obj.index.get_level_values(by)
self._key_columns.append(level_values._values)
self.names.append(level_values.name)
def _handle_misc(self, by):
by = cudf.core.column.as_column(by)
if len(by) != len(self._obj):
raise ValueError("Grouper and object must have same length")
self._key_columns.append(by)
self.names.append(None)
def serialize(self):
header = {}
frames = []
header["names"] = pickle.dumps(self.names)
header["_named_columns"] = pickle.dumps(self._named_columns)
column_header, column_frames = cudf.core.column.serialize_columns(
self._key_columns
)
header["columns"] = column_header
frames.extend(column_frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
names = pickle.loads(header["names"])
_named_columns = pickle.loads(header["_named_columns"])
key_columns = cudf.core.column.deserialize_columns(
header["columns"], frames
)
out = _Grouping.__new__(_Grouping)
out.names = names
out._named_columns = _named_columns
out._key_columns = key_columns
return out
def copy(self, deep=True):
out = _Grouping.__new__(_Grouping)
out.names = copy.deepcopy(self.names)
out._named_columns = copy.deepcopy(self._named_columns)
out._key_columns = [col.copy(deep=deep) for col in self._key_columns]
return out
def _is_multi_agg(aggs):
"""
Returns True if more than one aggregation is performed
on any of the columns as specified in `aggs`.
"""
if isinstance(aggs, abc.Mapping):
return any(is_list_like(agg) for agg in aggs.values())
if is_list_like(aggs):
return True
return False
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/groupby/__init__.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from cudf.core.groupby.groupby import GroupBy, Grouper
__all__ = [
"GroupBy",
"Grouper",
]
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/join/join.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
import itertools
import warnings
from typing import Any, ClassVar, List, Optional
import cudf
from cudf import _lib as libcudf
from cudf._lib.types import size_type_dtype
from cudf.core.copy_types import GatherMap
from cudf.core.join._join_helpers import (
_coerce_to_tuple,
_ColumnIndexer,
_IndexIndexer,
_match_join_keys,
)
class Merge:
# The joiner function must have the following signature:
#
# def joiner(
# lhs: Frame,
# rhs: Frame
# ) -> Tuple[Optional[Column], Optional[Column]]:
# ...
#
# where `lhs` and `rhs` are Frames composed of the left and right
# join key. The `joiner` returns a tuple of two Columns
# representing the rows to gather from the left- and right- side
# tables respectively.
_joiner: ClassVar[staticmethod] = staticmethod(libcudf.join.join)
def __init__(
self,
lhs,
rhs,
*,
on,
left_on,
right_on,
left_index,
right_index,
how,
sort,
indicator,
suffixes,
):
"""
Manage the merging of two Frames.
Parameters
----------
lhs : DataFrame
The left operand of the merge
rhs : DataFrame
The right operand of the merge
on : string or list like
A set of key columns in the left and right operands
elements must be common to both frames
left_on : string or list like
A set of key columns in the left operand. Must be
specified with right_on or right_index concurrently
right_on : string or list like
A set of key columns in the right operand. Must be
specified with left_on or left_index concurrently
left_index : bool
Boolean flag indicating the left index column or columns
are to be used as join keys in order.
right_index : bool
Boolean flag indicating the right index column or columns
are to be used as join keys in order.
how : string
The type of join. Possible values are
'inner', 'outer', 'left', 'leftsemi' and 'leftanti'
sort : bool
Boolean flag indicating if the output Frame is to be
sorted on the output's join keys, in left to right order.
suffixes : list like
Left and right suffixes specified together, unpacked into lsuffix
and rsuffix.
"""
self._validate_merge_params(
lhs,
rhs,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
how=how,
suffixes=suffixes,
)
self.lhs = lhs.copy(deep=False)
self.rhs = rhs.copy(deep=False)
self.how = how
# If the user requests that the result is sorted or we're in
# pandas-compatible mode we have various obligations on the
# output order:
#
# compat-> | False | True
# sort | |
# ---------+--------------------------+-------------------------------
# False| no obligation | ordering as per pandas docs(*)
# True | sorted lexicographically | sorted lexicographically(*)
#
# (*) If two keys are equal, tiebreak is to use input table order.
#
# In pandas-compat mode, we have obligations on the order to
# match pandas (even if sort=False), see
# pandas.pydata.org/docs/reference/api/pandas.DataFrame.merge.html.
# The ordering requirements differ depending on which join
# type is specified:
#
# - left: preserve key order (only keeping left keys)
# - right: preserve key order (only keeping right keys)
# - inner: preserve key order (of left keys)
# - outer: sort keys lexicographically
# - cross (not supported): preserve key order (of left keys)
#
# Moreover, in all cases, whenever there is a tiebreak
# situation (for sorting or otherwise), the deciding order is
# "input table order"
self.sort = sort or (
cudf.get_option("mode.pandas_compatible") and how == "outer"
)
self.preserve_key_order = cudf.get_option(
"mode.pandas_compatible"
) and how in {
"inner",
"outer",
"left",
"right",
}
self.lsuffix, self.rsuffix = suffixes
# At this point validation guarantees that if on is not None we
# don't have any other args, so we can apply it directly to left_on and
# right_on.
self._using_left_index = bool(left_index)
left_on = (
lhs.index._data.names if left_index else left_on if left_on else on
)
self._using_right_index = bool(right_index)
right_on = (
rhs.index._data.names
if right_index
else right_on
if right_on
else on
)
if left_on or right_on:
self._left_keys = [
_ColumnIndexer(name=on)
if not self._using_left_index and on in lhs._data
else _IndexIndexer(name=on)
for on in (_coerce_to_tuple(left_on) if left_on else [])
]
self._right_keys = [
_ColumnIndexer(name=on)
if not self._using_right_index and on in rhs._data
else _IndexIndexer(name=on)
for on in (_coerce_to_tuple(right_on) if right_on else [])
]
if len(self._left_keys) != len(self._right_keys):
raise ValueError(
"Merge operands must have same number of join key columns"
)
self._using_left_index = any(
isinstance(idx, _IndexIndexer) for idx in self._left_keys
)
self._using_right_index = any(
isinstance(idx, _IndexIndexer) for idx in self._right_keys
)
else:
# if `on` is not provided and we're not merging
# index with column or on both indexes, then use
# the intersection of columns in both frames
on_names = set(lhs._data) & set(rhs._data)
self._left_keys = [_ColumnIndexer(name=on) for on in on_names]
self._right_keys = [_ColumnIndexer(name=on) for on in on_names]
self._using_left_index = False
self._using_right_index = False
self._key_columns_with_same_name = (
set(_coerce_to_tuple(on))
if on
else {
lkey.name
for lkey, rkey in zip(self._left_keys, self._right_keys)
if lkey.name == rkey.name
and not (
isinstance(lkey, _IndexIndexer)
or isinstance(rkey, _IndexIndexer)
)
}
)
def _gather_maps(self, left_cols, right_cols):
# Produce gather maps for the join, optionally reordering to
# match pandas-order in compat mode.
maps = self._joiner(
left_cols,
right_cols,
how=self.how,
)
if not self.preserve_key_order:
return maps
# We should only get here if we're in a join on which
# pandas-compat places some ordering obligation (which
# precludes a semi-join)
# We must perform this reordering even if sort=True since the
# obligation to ensure tiebreaks appear in input table order
# means that the gather maps must be permuted into an original
# order.
assert self.how in {"inner", "outer", "left", "right"}
# And hence both maps returned from the libcudf join should be
# non-None.
assert all(m is not None for m in maps)
lengths = [len(left_cols[0]), len(right_cols[0])]
# Only nullify those maps that need it.
nullify = [
self.how not in {"inner", "left"},
self.how not in {"inner", "right"},
]
# To reorder maps so that they are in order of the input
# tables, we gather from iota on both right and left, and then
# sort the gather maps with those two columns as key.
key_order = list(
itertools.chain.from_iterable(
libcudf.copying.gather(
[cudf.core.column.arange(n, dtype=size_type_dtype)],
map_,
nullify=null,
)
for map_, n, null in zip(maps, lengths, nullify)
)
)
return libcudf.sort.sort_by_key(
list(maps),
# If how is right, right map is primary sort key.
key_order[:: -1 if self.how == "right" else 1],
[True] * len(key_order),
["last"] * len(key_order),
stable=True,
)
def perform_merge(self) -> cudf.DataFrame:
left_join_cols = []
right_join_cols = []
for left_key, right_key in zip(self._left_keys, self._right_keys):
lcol = left_key.get(self.lhs)
rcol = right_key.get(self.rhs)
lcol_casted, rcol_casted = _match_join_keys(lcol, rcol, self.how)
left_join_cols.append(lcol_casted)
right_join_cols.append(rcol_casted)
# Categorical dtypes must be cast back from the underlying codes
# type that was returned by _match_join_keys.
if (
self.how == "inner"
and isinstance(lcol.dtype, cudf.CategoricalDtype)
and isinstance(rcol.dtype, cudf.CategoricalDtype)
):
lcol_casted = lcol_casted.astype("category")
rcol_casted = rcol_casted.astype("category")
left_key.set(self.lhs, lcol_casted, validate=False)
right_key.set(self.rhs, rcol_casted, validate=False)
left_rows, right_rows = self._gather_maps(
left_join_cols, right_join_cols
)
gather_kwargs = {
"keep_index": self._using_left_index or self._using_right_index,
}
left_result = (
self.lhs._gather(
GatherMap.from_column_unchecked(
left_rows, len(self.lhs), nullify=True
),
**gather_kwargs,
)
if left_rows is not None
else cudf.DataFrame._from_data({})
)
del left_rows
right_result = (
self.rhs._gather(
GatherMap.from_column_unchecked(
right_rows, len(self.rhs), nullify=True
),
**gather_kwargs,
)
if right_rows is not None
else cudf.DataFrame._from_data({})
)
del right_rows
result = cudf.DataFrame._from_data(
*self._merge_results(left_result, right_result)
)
if self.sort:
result = self._sort_result(result)
return result
def _merge_results(
self, left_result: cudf.DataFrame, right_result: cudf.DataFrame
):
# Merge the DataFrames `left_result` and `right_result` into a single
# `DataFrame`, suffixing column names if necessary.
# If two key columns have the same name, a single output column appears
# in the result. For all non-outer join types, the key column from the
# rhs is simply dropped. For outer joins, the two key columns are
# combined by filling nulls in the left key column with corresponding
# values from the right key column:
if self.how == "outer":
for lkey, rkey in zip(self._left_keys, self._right_keys):
if lkey.name == rkey.name:
# fill nulls in lhs from values in the rhs
lkey.set(
left_result,
lkey.get(left_result).fillna(rkey.get(right_result)),
validate=False,
)
# All columns from the left table make it into the output. Non-key
# columns that share a name with a column in the right table are
# suffixed with the provided suffix.
common_names = set(left_result._data.names) & set(
right_result._data.names
)
cols_to_suffix = common_names - self._key_columns_with_same_name
data = {
(f"{name}{self.lsuffix}" if name in cols_to_suffix else name): col
for name, col in left_result._data.items()
}
# The right table follows the same rule as the left table except that
# key columns from the right table are removed.
for name, col in right_result._data.items():
if name in common_names:
if name not in self._key_columns_with_same_name:
data[f"{name}{self.rsuffix}"] = col
else:
data[name] = col
# determine if the result has multiindex columns. The result
# of a join has a MultiIndex as its columns if:
# - both the `lhs` and `rhs` have a MultiIndex columns
# OR
# - either one of `lhs` or `rhs` have a MultiIndex columns,
# and the other is empty (i.e., no columns)
if self.lhs._data and self.rhs._data:
multiindex_columns = (
self.lhs._data.multiindex and self.rhs._data.multiindex
)
elif self.lhs._data:
multiindex_columns = self.lhs._data.multiindex
elif self.rhs._data:
multiindex_columns = self.rhs._data.multiindex
else:
multiindex_columns = False
index: Optional[cudf.BaseIndex]
if self._using_right_index:
# right_index and left_on
index = left_result._index
elif self._using_left_index:
# left_index and right_on
index = right_result._index
else:
index = None
# Construct result from data and index:
return (
left_result._data.__class__(
data=data, multiindex=multiindex_columns
),
index,
)
def _sort_result(self, result: cudf.DataFrame) -> cudf.DataFrame:
# Pandas sorts on the key columns in the
# same order as given in 'on'. If the indices are used as
# keys, the index will be sorted. If one index is specified,
# the key columns on the other side will be used to sort.
# In pandas-compatible mode, tie-breaking for multiple equal
# sort keys is to produce output in input dataframe order.
# This is taken care of by using a stable sort here, and (in
# pandas-compat mode) reordering the gather maps before
# producing the input result.
by: List[Any] = []
if self._using_left_index and self._using_right_index:
by.extend(result._index._data.columns)
if not self._using_left_index:
by.extend([result._data[col.name] for col in self._left_keys])
if not self._using_right_index:
by.extend([result._data[col.name] for col in self._right_keys])
if by:
keep_index = self._using_left_index or self._using_right_index
if keep_index:
to_sort = [*result._index._columns, *result._columns]
index_names = result._index.names
else:
to_sort = [*result._columns]
index_names = None
result_columns = libcudf.sort.sort_by_key(
to_sort,
by,
[True] * len(by),
["last"] * len(by),
stable=True,
)
result = result._from_columns_like_self(
result_columns, result._column_names, index_names
)
return result
@staticmethod
def _validate_merge_params(
lhs,
rhs,
on,
left_on,
right_on,
left_index,
right_index,
how,
suffixes,
):
# Error for various invalid combinations of merge input parameters
# We must actually support the requested merge type
if how not in {"left", "inner", "outer", "leftanti", "leftsemi"}:
raise NotImplementedError(f"{how} merge not supported yet")
if on:
if left_on or right_on:
# Passing 'on' with 'left_on' or 'right_on' is ambiguous
raise ValueError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
elif left_index or right_index:
# Passing 'on' with 'left_index' or 'right_index' is ambiguous
raise ValueError(
'Can only pass argument "on" OR "left_index" '
'and "right_index", not a combination of both.'
)
else:
# the validity of 'on' being checked by _Indexer
return
elif left_on and left_index:
raise ValueError(
'Can only pass argument "left_on" OR "left_index" not both.'
)
elif right_on and right_index:
raise ValueError(
'Can only pass argument "right_on" OR "right_index" not both.'
)
# Can't merge on a column name that is present in both a frame and its
# indexes.
if on:
for key in on:
if (key in lhs._data and key in lhs.index._data) or (
key in rhs._data and key in rhs.index._data
):
raise ValueError(
f"{key} is both an index level and a "
"column label, which is ambiguous."
)
if left_on:
for key in left_on:
if key in lhs._data and key in lhs.index._data:
raise ValueError(
f"{key} is both an index level and a "
"column label, which is ambiguous."
)
if right_on:
for key in right_on:
if key in rhs._data and key in rhs.index._data:
raise ValueError(
f"{key} is both an index level and a "
"column label, which is ambiguous."
)
# Can't merge on unnamed Series
if (isinstance(lhs, cudf.Series) and not lhs.name) or (
isinstance(rhs, cudf.Series) and not rhs.name
):
raise ValueError("Cannot merge on unnamed Series")
# If nothing specified, must have common cols to use implicitly
same_named_columns = set(lhs._data) & set(rhs._data)
if (
not (left_index or right_index)
and not (left_on or right_on)
and len(same_named_columns) == 0
):
raise ValueError("No common columns to perform merge on")
lsuffix, rsuffix = suffixes
for name in same_named_columns:
if name == left_on == right_on:
continue
elif left_on and right_on:
if (name in left_on and name in right_on) and (
left_on.index(name) == right_on.index(name)
):
continue
else:
if not (lsuffix or rsuffix):
raise ValueError(
"there are overlapping columns but "
"lsuffix and rsuffix are not defined"
)
if (
isinstance(lhs, cudf.DataFrame)
and isinstance(rhs, cudf.DataFrame)
# An empty column is considered to have 1 level by pandas (can be
# seen by using lhs.columns.nlevels, but we don't want to use
# columns internally because it's expensive).
# TODO: Investigate whether ColumnAccessor.nlevels should be
# modified in the size 0 case.
and max(lhs._data.nlevels, 1) != max(rhs._data.nlevels, 1)
):
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"merging between different levels is deprecated and will be "
f"removed in a future version. ({lhs._data.nlevels} levels on "
f"the left, {rhs._data.nlevels} on the right)",
FutureWarning,
)
class MergeSemi(Merge):
_joiner: ClassVar[staticmethod] = staticmethod(libcudf.join.semi_join)
def _merge_results(self, lhs: cudf.DataFrame, rhs: cudf.DataFrame):
# semi-join result includes only lhs columns
return lhs._data, lhs._index
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/join/_join_helpers.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
from __future__ import annotations
from collections import abc
from typing import TYPE_CHECKING, Any, Tuple, cast
import numpy as np
import cudf
from cudf.api.types import is_decimal_dtype, is_dtype_equal
from cudf.core.column import CategoricalColumn
from cudf.core.dtypes import CategoricalDtype
if TYPE_CHECKING:
from cudf.core.column import ColumnBase
class _Indexer:
# Indexer into a column (either a data column or index level).
#
# >>> df
# a
# b
# 4 1
# 5 2
# 6 3
# >>> _Indexer("a", column=True).get(df) # returns column "a" of df
# >>> _Indexer("b", index=True).get(df) # returns index level "b" of df
def __init__(self, name: Any):
self.name = name
class _ColumnIndexer(_Indexer):
def get(self, obj: cudf.DataFrame) -> ColumnBase:
return obj._data[self.name]
def set(self, obj: cudf.DataFrame, value: ColumnBase, validate=False):
obj._data.set_by_label(self.name, value, validate=validate)
class _IndexIndexer(_Indexer):
def get(self, obj: cudf.DataFrame) -> ColumnBase:
return obj._index._data[self.name]
def set(self, obj: cudf.DataFrame, value: ColumnBase, validate=False):
obj._index._data.set_by_label(self.name, value, validate=validate)
def _match_join_keys(
lcol: ColumnBase, rcol: ColumnBase, how: str
) -> Tuple[ColumnBase, ColumnBase]:
# Casts lcol and rcol to a common dtype for use as join keys. If no casting
# is necessary, they are returned as is.
common_type = None
# cast the keys lcol and rcol to a common dtype
ltype = lcol.dtype
rtype = rcol.dtype
# if either side is categorical, different logic
left_is_categorical = isinstance(ltype, CategoricalDtype)
right_is_categorical = isinstance(rtype, CategoricalDtype)
if left_is_categorical and right_is_categorical:
return _match_categorical_dtypes_both(
cast(CategoricalColumn, lcol), cast(CategoricalColumn, rcol), how
)
elif left_is_categorical or right_is_categorical:
if left_is_categorical:
if how in {"left", "leftsemi", "leftanti"}:
return lcol, rcol.astype(ltype)
common_type = ltype.categories.dtype
else:
common_type = rtype.categories.dtype
common_type = cudf.utils.dtypes._dtype_pandas_compatible(common_type)
return lcol.astype(common_type), rcol.astype(common_type)
if is_dtype_equal(ltype, rtype):
return lcol, rcol
if is_decimal_dtype(ltype) or is_decimal_dtype(rtype):
raise TypeError(
"Decimal columns can only be merged with decimal columns "
"of the same precision and scale"
)
if (
np.issubdtype(ltype, np.number)
and np.issubdtype(rtype, np.number)
and not (
np.issubdtype(ltype, np.timedelta64)
or np.issubdtype(rtype, np.timedelta64)
)
):
common_type = (
max(ltype, rtype)
if ltype.kind == rtype.kind
else np.find_common_type([], (ltype, rtype))
)
elif (
np.issubdtype(ltype, np.datetime64)
and np.issubdtype(rtype, np.datetime64)
) or (
np.issubdtype(ltype, np.timedelta64)
and np.issubdtype(rtype, np.timedelta64)
):
common_type = max(ltype, rtype)
elif (
np.issubdtype(ltype, np.datetime64)
or np.issubdtype(ltype, np.timedelta64)
) and not rcol.fillna(0).can_cast_safely(ltype):
raise TypeError(
f"Cannot join between {ltype} and {rtype}, please type-cast both "
"columns to the same type."
)
elif (
np.issubdtype(rtype, np.datetime64)
or np.issubdtype(rtype, np.timedelta64)
) and not lcol.fillna(0).can_cast_safely(rtype):
raise TypeError(
f"Cannot join between {rtype} and {ltype}, please type-cast both "
"columns to the same type."
)
if how == "left" and rcol.fillna(0).can_cast_safely(ltype):
return lcol, rcol.astype(ltype)
return lcol.astype(common_type), rcol.astype(common_type)
def _match_categorical_dtypes_both(
lcol: CategoricalColumn, rcol: CategoricalColumn, how: str
) -> Tuple[ColumnBase, ColumnBase]:
ltype, rtype = lcol.dtype, rcol.dtype
# when both are ordered and both have the same categories,
# no casting required:
if ltype == rtype:
return lcol, rcol
# Merging categorical variables when only one side is ordered is
# ambiguous and not allowed.
if ltype.ordered != rtype.ordered:
raise TypeError(
"Merging on categorical variables with mismatched"
" ordering is ambiguous"
)
if ltype.ordered and rtype.ordered:
# if we get to here, categories must be what causes the
# dtype equality check to fail. And we can never merge
# two ordered categoricals with different categories
raise TypeError(
f"{how} merge between categoricals with "
"different categories is only valid when "
"neither side is ordered"
)
if how == "inner":
# cast to category types -- we must cast them back later
return _match_join_keys(
lcol._get_decategorized_column(),
rcol._get_decategorized_column(),
how,
)
elif how in {"left", "leftanti", "leftsemi"}:
# always cast to left type
return lcol, rcol.astype(ltype)
else:
# merge categories
merged_categories = cudf.concat(
[ltype.categories, rtype.categories]
).unique()
common_type = cudf.CategoricalDtype(
categories=merged_categories, ordered=False
)
return lcol.astype(common_type), rcol.astype(common_type)
def _coerce_to_tuple(obj):
if isinstance(obj, abc.Iterable) and not isinstance(obj, str):
return tuple(obj)
else:
return (obj,)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/join/__init__.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
from cudf.core.join.join import Merge, MergeSemi
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/binops.pyi
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from typing import Any, Set, Tuple, TypeVar
# Note: It may be possible to define a narrower bound here eventually.
BinaryOperandType = TypeVar("BinaryOperandType", bound="Any")
class BinaryOperand:
_SUPPORTED_BINARY_OPERATIONS: Set
def _binaryop(self, other: BinaryOperandType, op: str): ...
def __add__(self, other): ...
def __sub__(self, other): ...
def __mul__(self, other): ...
def __truediv__(self, other): ...
def __floordiv__(self, other): ...
def __mod__(self, other): ...
def __pow__(self, other): ...
def __and__(self, other): ...
def __xor__(self, other): ...
def __or__(self, other): ...
def __radd__(self, other): ...
def __rsub__(self, other): ...
def __rmul__(self, other): ...
def __rtruediv__(self, other): ...
def __rfloordiv__(self, other): ...
def __rmod__(self, other): ...
def __rpow__(self, other): ...
def __rand__(self, other): ...
def __rxor__(self, other): ...
def __ror__(self, other): ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __eq__(self, other): ...
def __ne__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
@staticmethod
def _check_reflected_op(op) -> Tuple[bool, str]: ...
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/reductions.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from .mixin_factory import _create_delegating_mixin
Reducible = _create_delegating_mixin(
"Reducible",
"Mixin encapsulating reduction operations.",
"REDUCTION",
"_reduce",
{
"sum",
"product",
"min",
"max",
"count",
"any",
"all",
"sum_of_squares",
"mean",
"var",
"std",
"median",
"argmax",
"argmin",
"nunique",
"nth",
"collect",
"unique",
"prod",
"idxmin",
"idxmax",
"first",
"last",
},
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/scans.pyi
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from typing import Set
class Scannable:
_SUPPORTED_SCANS: Set
def cumsum(self): ...
def cumprod(self): ...
def cummin(self): ...
def cummax(self): ...
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/reductions.pyi
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from typing import Set
class Reducible:
_SUPPORTED_REDUCTIONS: Set
def sum(self): ...
def product(self): ...
def min(self): ...
def max(self): ...
def count(self): ...
def any(self): ...
def all(self): ...
def sum_of_squares(self): ...
def mean(self): ...
def var(self): ...
def std(self): ...
def median(self): ...
def argmax(self): ...
def argmin(self): ...
def nunique(self): ...
def nth(self): ...
def collect(self): ...
def prod(self): ...
def idxmin(self): ...
def idxmax(self): ...
def first(self): ...
def last(self): ...
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/scans.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from .mixin_factory import _create_delegating_mixin
Scannable = _create_delegating_mixin(
"Scannable",
"Mixin encapsulating scan operations.",
"SCAN",
"_scan",
{
"cumsum",
"cumprod",
"cummin",
"cummax",
}, # noqa: E231
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/mixin_factory.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
import inspect
# `functools.partialmethod` does not allow setting attributes such as
# __doc__ on the resulting method. So we use a simple alternative to
# it here:
def _partialmethod(method, *args1, **kwargs1):
def wrapper(self, *args2, **kwargs2):
return method(self, *args1, *args2, **kwargs1, **kwargs2)
return wrapper
class Operation:
"""Descriptor used to define operations for delegating mixins.
This class is designed to be assigned to the attributes (the delegating
methods) defined by the OperationMixin. This class will create the method
and mimic all the expected attributes for that method to appear as though
it was originally designed on the class. The use of the descriptor pattern
ensures that the method is only created the first time it is invoked, after
which all further calls use the callable generated on the first invocation.
Parameters
----------
name : str
The name of the operation.
docstring_format_args : str
The attribute of the owning class from which to pull format parameters
for this operation's docstring.
base_operation : str
The underlying operation function to be invoked when operation `name`
is called on the owning class.
"""
def __init__(self, name, docstring_format_args, base_operation):
self._name = name
self._docstring_format_args = docstring_format_args
self._base_operation = base_operation
def __get__(self, obj, owner=None):
retfunc = _partialmethod(self._base_operation, op=self._name)
# Required attributes that will exist.
retfunc.__name__ = self._name
retfunc.__qualname__ = ".".join([owner.__name__, self._name])
retfunc.__module__ = self._base_operation.__module__
if self._base_operation.__doc__ is not None:
retfunc.__doc__ = self._base_operation.__doc__.format(
cls=owner.__name__,
op=self._name,
**self._docstring_format_args,
)
retfunc.__annotations__ = self._base_operation.__annotations__.copy()
retfunc.__annotations__.pop("op", None)
retfunc_params = [
v
for k, v in inspect.signature(
self._base_operation
).parameters.items()
if k != "op"
]
retfunc.__signature__ = inspect.Signature(retfunc_params)
setattr(owner, self._name, retfunc)
if obj is None:
return getattr(owner, self._name)
else:
return getattr(obj, self._name)
def _should_define_operation(cls, operation, base_operation_name):
if operation not in dir(cls):
return True
# If the class doesn't override the base operation we stick to whatever
# parent implementation exists.
if base_operation_name not in cls.__dict__:
return False
# At this point we know that the class has the operation defined but it
# also overrides the base operation. Since this function is called before
# the operation is defined on the current class, we know that it inherited
# the operation from a parent. We therefore have three possibilities:
# 1. A parent class manually defined the operation. That override takes
# precedence even if the current class defined the base operation.
# 2. A parent class has an auto-generated operation, i.e. it is of type
# Operation and was created by OperationMixin.__init_subclass__. The
# current class must override it so that its base operation is used
# rather than the parent's base operation.
# 3. The method is defined for all classes, i.e. it is a method of object.
for base_cls in cls.__mro__:
# We always override methods defined for object.
if base_cls is object:
return True
# The first attribute in the MRO is the one that will be used.
if operation in base_cls.__dict__:
return isinstance(base_cls.__dict__[operation], Operation)
# This line should be unreachable since we know the attribute exists
# somewhere in the MRO if the for loop was entered.
assert False, "Operation attribute not found in hierarchy."
def _create_delegating_mixin(
mixin_name,
docstring,
category_name,
base_operation_name,
supported_operations,
):
"""Factory for mixins defining collections of delegated operations.
This function generates mixins based on two common paradigms in cuDF:
1. libcudf groups many operations into categories using a common API. These
APIs usually accept an enum to delineate the specific operation to
perform, e.g. binary operations use the `binary_operator` enum when
calling the `binary_operation` function. cuDF Python mimics this
structure by having operations within a category delegate to a common
internal function (e.g. DataFrame.__add__ calls DataFrame._binaryop).
2. Many cuDF classes implement similar operations (e.g. `sum`) via
delegation to lower-level APIs before reaching a libcudf C++ function
call. As a result, many API function calls actually involve multiple
delegations to lower-level APIs that can look essentially identical. An
example of such a sequence would be DataFrame.sum -> DataFrame._reduce
-> Column.sum -> Column._reduce -> libcudf.
This factory creates mixins for a category of operations implemented by via
this delegator pattern. The resulting mixins make it easy to share common
functions across various classes while also providing a common entrypoint
for implementing the centralized logic for a given category of operations.
Its usage is best demonstrated by example below.
Parameters
----------
mixin_name : str
The name of the class. This argument should be the same as the object
that this function's output is assigned to, e.g.
:code:`Baz = _create_delegating_mixin("Baz", ...)`.
docstring : str
The documentation string for the mixin class.
category_name : str
The category of operations for which a mixin is being created. This
name will be used to define or access the following attributes as shown
in the example below:
- f'_{category_name}_DOCSTRINGS'
- f'_VALID_{category_name}S' # The subset of ops a subclass allows
- f'_SUPPORTED_{category_name}S' # The ops supported by the mixin
base_operation_name : str
The name given to the core function implementing this category of
operations. The corresponding function is the entrypoint for child
classes.
supported_ops : List[str]
The list of valid operations that subclasses of the resulting mixin may
request to be implemented.
Examples
--------
>>> # The class below:
>>> class Person:
... def _greet(self, op):
... print(op)
...
... def hello(self):
... self._greet("hello")
...
... def goodbye(self):
... self._greet("goodbye")
>>> # can be rewritten using a delegating mixin as follows:
>>> Greeter = _create_delegating_mixin(
... "Greeter", "", "GREETING", "_greet", {"hello", "goodbye", "hey"}
... )
>>> # The `hello` and `goodbye` methods will now be automatically generated
>>> # for the Person class below.
>>> class Person(Greeter):
... _VALID_GREETINGS = {"hello", "goodbye"}
...
... def _greet(self, op: str):
... '''Say {op}.'''
... print(op)
>>> mom = Person()
>>> mom.hello()
hello
>>> # The Greeter class could also enable the `hey` method, but Person did
>>> # not include it in the _VALID_GREETINGS set so it will not exist.
>>> mom.hey()
Traceback (most recent call last):
...
AttributeError: 'Person' object has no attribute 'hey'
>>> # The docstrings for each method are generated by formatting the _greet
>>> # docstring with the operation name as well as any additional keys
>>> # provided via the _GREETING_DOCSTRINGS parameter.
>>> print(mom.hello.__doc__)
Say hello.
"""
# The first two attributes may be defined on subclasses of the generated
# OperationMixin to indicate valid attributes and parameters to use when
# formatting docstrings. The supported_attr will be defined on the
# OperationMixin itself to indicate what operations its subclass may
# inherit from it.
validity_attr = f"_VALID_{category_name}S"
docstring_attr = f"_{category_name}_DOCSTRINGS"
supported_attr = f"_SUPPORTED_{category_name}S"
class OperationMixin:
@classmethod
def __init_subclass__(cls):
# Support composition of various OperationMixins. Note that since
# this __init_subclass__ is defined on mixins, it does not prohibit
# classes that inherit it from implementing this method as well as
# long as those implementations also include this super call.
super().__init_subclass__()
# Only add the valid set of operations for a particular class.
valid_operations = set()
for base_cls in cls.__mro__:
# Check for sentinel indicating that all operations are valid.
valid_operations |= getattr(base_cls, validity_attr, set())
invalid_operations = valid_operations - supported_operations
assert (
len(invalid_operations) == 0
), f"Invalid requested operations: {invalid_operations}"
base_operation = getattr(cls, base_operation_name)
for operation in valid_operations:
if _should_define_operation(
cls, operation, base_operation_name
):
docstring_format_args = getattr(
cls, docstring_attr, {}
).get(operation, {})
op_attr = Operation(
operation, docstring_format_args, base_operation
)
setattr(cls, operation, op_attr)
OperationMixin.__name__ = mixin_name
OperationMixin.__qualname__ = mixin_name
OperationMixin.__doc__ = docstring
def _operation(self, op: str, *args, **kwargs):
raise NotImplementedError
_operation.__name__ = base_operation_name
_operation.__qualname__ = ".".join([mixin_name, base_operation_name])
_operation.__doc__ = (
f"The core {category_name.lower()} function. Must be overridden by "
"subclasses, the default implementation raises a NotImplementedError."
)
setattr(OperationMixin, base_operation_name, _operation)
# Making this attribute available makes it easy for subclasses to indicate
# that all supported operations for this mixin are valid.
setattr(OperationMixin, supported_attr, supported_operations)
return OperationMixin
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/binops.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from .mixin_factory import _create_delegating_mixin
BinaryOperand = _create_delegating_mixin(
"BinaryOperand",
"Mixin encapsulating binary operations.",
"BINARY_OPERATION",
"_binaryop",
{
# Numeric operations.
"__add__",
"__sub__",
"__mul__",
"__matmul__",
"__truediv__",
"__floordiv__",
"__mod__",
# "__divmod__", # Not yet implemented
"__pow__",
# "__lshift__", # Not yet implemented
# "__rshift__", # Not yet implemented
"__and__",
"__xor__",
"__or__",
# Reflected numeric operations.
"__radd__",
"__rsub__",
"__rmul__",
"__rmatmul__",
"__rtruediv__",
"__rfloordiv__",
"__rmod__",
# "__rdivmod__", # Not yet implemented
"__rpow__",
# "__rlshift__", # Not yet implemented
# "__rrshift__", # Not yet implemented
"__rand__",
"__rxor__",
"__ror__",
# Rich comparison operations.
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
},
)
# TODO: See if there is a better approach to these two issues: 1) The mixin
# assumes a single standard parameter, whereas binops have two, and 2) we need
# a way to determine reflected vs normal ops.
def _binaryop(self, other, op: str):
"""The core binary_operation function.
Must be overridden by subclasses, the default implementation raises a
NotImplementedError.
"""
raise NotImplementedError
def _check_reflected_op(op):
if reflect := op[2] == "r" and op != "__rshift__":
op = op[:2] + op[3:]
return reflect, op
BinaryOperand._binaryop = _binaryop
BinaryOperand._check_reflected_op = staticmethod(_check_reflected_op)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/mixins/__init__.py
|
# Copyright (c) 2022, NVIDIA CORPORATION.
from .binops import BinaryOperand
from .reductions import Reducible
from .scans import Scannable
__all__ = ["BinaryOperand", "Reducible", "Scannable"]
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/categorical.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from __future__ import annotations
import warnings
from collections import abc
from functools import cached_property
from typing import TYPE_CHECKING, Any, Mapping, Optional, Sequence, Tuple, cast
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda
from typing_extensions import Self
import cudf
from cudf import _lib as libcudf
from cudf._lib.transform import bools_to_mask
from cudf._typing import ColumnBinaryOperand, ColumnLike, Dtype, ScalarLike
from cudf.api.types import is_categorical_dtype, is_interval_dtype
from cudf.core.buffer import Buffer
from cudf.core.column import column
from cudf.core.column.methods import ColumnMethods
from cudf.core.dtypes import CategoricalDtype
from cudf.utils.dtypes import (
is_mixed_with_object_dtype,
min_signed_type,
min_unsigned_type,
)
if TYPE_CHECKING:
from cudf._typing import SeriesOrIndex, SeriesOrSingleColumnIndex
from cudf.core.column import (
ColumnBase,
DatetimeColumn,
NumericalColumn,
StringColumn,
TimeDeltaColumn,
)
_DEFAULT_CATEGORICAL_VALUE = -1
class CategoricalAccessor(ColumnMethods):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation,
while all methods return new categorical data per default.
Parameters
----------
column : Column
parent : Series or CategoricalIndex
Examples
--------
>>> s = cudf.Series([1,2,3], dtype='category')
>>> s
0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]
>>> s.cat.categories
Int64Index([1, 2, 3], dtype='int64')
>>> s.cat.reorder_categories([3,2,1])
0 1
1 2
2 3
dtype: category
Categories (3, int64): [3, 2, 1]
>>> s.cat.remove_categories([1])
0 <NA>
1 2
2 3
dtype: category
Categories (2, int64): [2, 3]
>>> s.cat.set_categories(list('abcde'))
0 <NA>
1 <NA>
2 <NA>
dtype: category
Categories (5, object): ['a', 'b', 'c', 'd', 'e']
>>> s.cat.as_ordered()
0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]
>>> s.cat.as_unordered()
0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]
"""
_column: CategoricalColumn
def __init__(self, parent: SeriesOrSingleColumnIndex):
if not is_categorical_dtype(parent.dtype):
raise AttributeError(
"Can only use .cat accessor with a 'category' dtype"
)
super().__init__(parent=parent)
@property
def categories(self) -> "cudf.core.index.GenericIndex":
"""
The categories of this categorical.
"""
return cudf.core.index.as_index(self._column.categories)
@property
def codes(self) -> "cudf.Series":
"""
Return Series of codes as well as the index.
"""
index = (
self._parent.index
if isinstance(self._parent, cudf.Series)
else None
)
return cudf.Series(self._column.codes, index=index)
@property
def ordered(self) -> bool:
"""
Whether the categories have an ordered relationship.
"""
return self._column.ordered
def as_ordered(self, inplace: bool = False) -> Optional[SeriesOrIndex]:
"""
Set the Categorical to be ordered.
Parameters
----------
inplace : bool, default False
Whether or not to add the categories inplace
or return a copy of this categorical with
added categories.
.. deprecated:: 23.02
The `inplace` parameter is is deprecated and
will be removed in a future version of cudf.
Setting categories as ordered will always
return a new Categorical object.
Returns
-------
Categorical
Ordered Categorical or None if inplace.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.as_ordered()
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1 < 2 < 10]
>>> s.cat.as_ordered(inplace=True)
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1 < 2 < 10]
"""
if inplace:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"The inplace parameter is deprecated and will be removed in a "
"future release. set_ordered will always return a new Series "
"in the future.",
FutureWarning,
)
return self._return_or_inplace(
self._column.as_ordered(), inplace=inplace
)
def as_unordered(self, inplace: bool = False) -> Optional[SeriesOrIndex]:
"""
Set the Categorical to be unordered.
Parameters
----------
inplace : bool, default False
Whether or not to set the ordered attribute
in-place or return a copy of this
categorical with ordered set to False.
.. deprecated:: 23.02
The `inplace` parameter is is deprecated and
will be removed in a future version of cudf.
Setting categories as unordered will always
return a new Categorical object.
Returns
-------
Categorical
Unordered Categorical or None if inplace.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s = s.cat.as_ordered()
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1 < 2 < 10]
>>> s.cat.as_unordered()
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.as_unordered(inplace=True)
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
"""
if inplace:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"The inplace parameter is deprecated and will be removed in a "
"future release. set_ordered will always return a new Series "
"in the future.",
FutureWarning,
)
return self._return_or_inplace(
self._column.as_unordered(), inplace=inplace
)
def add_categories(
self, new_categories: Any, inplace: bool = False
) -> Optional[SeriesOrIndex]:
"""
Add new categories.
`new_categories` will be included at the last/highest
place in the categories and will be unused directly
after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
inplace : bool, default False
Whether or not to add the categories inplace
or return a copy of this categorical with
added categories.
.. deprecated:: 23.04
The `inplace` parameter is is deprecated and
will be removed in a future version of cudf.
Adding categories will always return a
new Categorical object.
Returns
-------
cat
Categorical with new categories added or
None if inplace.
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 2], dtype="category")
>>> s
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
>>> s.cat.add_categories([0, 3, 4])
0 1
1 2
dtype: category
Categories (5, int64): [1, 2, 0, 3, 4]
>>> s
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
>>> s.cat.add_categories([0, 3, 4], inplace=True)
>>> s
0 1
1 2
dtype: category
Categories (5, int64): [1, 2, 0, 3, 4]
"""
if inplace:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"The `inplace` parameter in cudf.Series.cat.add_categories "
"is deprecated and will be removed in a future version of "
"cudf. Adding categories will always return a new "
"Categorical object.",
FutureWarning,
)
old_categories = self._column.categories
new_categories = column.as_column(
new_categories,
dtype=old_categories.dtype if len(new_categories) == 0 else None,
)
if is_mixed_with_object_dtype(old_categories, new_categories):
raise TypeError(
f"cudf does not support adding categories with existing "
f"categories of dtype `{old_categories.dtype}` and new "
f"categories of dtype `{new_categories.dtype}`, please "
f"type-cast new_categories to the same type as "
f"existing categories."
)
common_dtype = np.find_common_type(
[old_categories.dtype, new_categories.dtype], []
)
new_categories = new_categories.astype(common_dtype)
old_categories = old_categories.astype(common_dtype)
if old_categories.isin(new_categories).any():
raise ValueError("new categories must not include old categories")
new_categories = old_categories.append(new_categories)
out_col = self._column
if not out_col._categories_equal(new_categories):
out_col = out_col._set_categories(new_categories)
return self._return_or_inplace(out_col, inplace=inplace)
def remove_categories(
self,
removals: Any,
inplace: bool = False,
) -> Optional[SeriesOrIndex]:
"""
Remove the specified categories.
`removals` must be included in the
old categories. Values which were in the
removed categories will be set to null.
Parameters
----------
removals : category or list-like of category
The categories which should be removed.
inplace : bool, default False
Whether or not to remove the categories
inplace or return a copy of this categorical
with removed categories.
.. deprecated:: 23.04
The `inplace` parameter is is deprecated and
will be removed in a future version of cudf.
Removing categories will always return a
new Categorical object.
Returns
-------
cat
Categorical with removed categories or None
if inplace.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.remove_categories([1])
0 10
1 <NA>
2 <NA>
3 2
4 10
5 2
6 10
dtype: category
Categories (2, int64): [2, 10]
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.remove_categories([10], inplace=True)
>>> s
0 <NA>
1 1
2 1
3 2
4 <NA>
5 2
6 <NA>
dtype: category
Categories (2, int64): [1, 2]
"""
if inplace:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"The `inplace` parameter in "
"cudf.Series.cat.remove_categories is deprecated and "
"will be removed in a future version of cudf. "
"Removing categories will always return a new "
"Categorical object.",
FutureWarning,
)
cats = self.categories.to_series()
removals = cudf.Series(removals, dtype=cats.dtype)
removals_mask = removals.isin(cats)
# ensure all the removals are in the current categories
# list. If not, raise an error to match Pandas behavior
if not removals_mask.all():
vals = removals[~removals_mask].to_numpy()
raise ValueError(f"removals must all be in old categories: {vals}")
new_categories = cats[~cats.isin(removals)]._column
out_col = self._column
if not out_col._categories_equal(new_categories):
out_col = out_col._set_categories(new_categories)
return self._return_or_inplace(out_col, inplace=inplace)
def set_categories(
self,
new_categories: Any,
ordered: bool = False,
rename: bool = False,
inplace: bool = False,
) -> Optional[SeriesOrIndex]:
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which
will result in unused categories) or remove old categories
(which results in values set to null). If `rename==True`,
the categories will simple be renamed (less or more items
than in old categories will result in values set to null or
in unused categories respectively).
This method can be used to perform more than one action
of adding, removing, and reordering simultaneously and
is therefore faster than performing the individual steps
via the more specialised methods.
On the other hand this methods does not do checks
(e.g., whether the old categories are included in the
new categories on a reorder), which can result in
surprising changes.
Parameters
----------
new_categories : list-like
The categories in new order.
ordered : bool, default None
Whether or not the categorical is treated as
a ordered categorical. If not given, do
not change the ordered information.
rename : bool, default False
Whether or not the `new_categories` should be
considered as a rename of the old categories
or as reordered categories.
inplace : bool, default False
Whether or not to reorder the categories in-place
or return a copy of this categorical with
reordered categories.
.. deprecated:: 23.04
The `inplace` parameter is is deprecated and
will be removed in a future version of cudf.
Setting categories will always return a
new Categorical object.
Returns
-------
cat
Categorical with reordered categories
or None if inplace.
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 1, 2, 10, 2, 10], dtype='category')
>>> s
0 1
1 1
2 2
3 10
4 2
5 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.set_categories([1, 10])
0 1
1 1
2 <NA>
3 10
4 <NA>
5 10
dtype: category
Categories (2, int64): [1, 10]
>>> s.cat.set_categories([1, 10], inplace=True)
>>> s
0 1
1 1
2 <NA>
3 10
4 <NA>
5 10
dtype: category
Categories (2, int64): [1, 10]
"""
if inplace:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"The `inplace` parameter in cudf.Series.cat.set_categories is "
"deprecated and will be removed in a future version of cudf. "
"Setting categories will always return a new Categorical "
"object.",
FutureWarning,
)
return self._return_or_inplace(
self._column.set_categories(
new_categories=new_categories, ordered=ordered, rename=rename
),
inplace=inplace,
)
def reorder_categories(
self,
new_categories: Any,
ordered: bool = False,
inplace: bool = False,
) -> Optional[SeriesOrIndex]:
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories
and no new category items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated
as a ordered categorical. If not given, do
not change the ordered information.
inplace : bool, default False
Whether or not to reorder the categories
inplace or return a copy of this categorical
with reordered categories.
.. deprecated:: 23.04
The `inplace` parameter is is deprecated and
will be removed in a future version of cudf.
Reordering categories will always return a
new Categorical object.
Returns
-------
cat
Categorical with reordered categories or
None if inplace.
Raises
------
ValueError
If the new categories do not contain all old
category items or any new ones.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.reorder_categories([10, 1, 2])
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [10, 1, 2]
>>> s.cat.reorder_categories([10, 1])
ValueError: items in new_categories are not the same as in
old categories
"""
if inplace:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"The `inplace` parameter in "
"cudf.Series.cat.reorder_categories is deprecated "
"and will be removed in a future version of cudf. "
"Reordering categories will always return a new "
"Categorical object.",
FutureWarning,
)
return self._return_or_inplace(
self._column.reorder_categories(new_categories, ordered=ordered),
inplace=inplace,
)
class CategoricalColumn(column.ColumnBase):
"""
Implements operations for Columns of Categorical type
Parameters
----------
dtype : CategoricalDtype
mask : Buffer
The validity mask
offset : int
Data offset
children : Tuple[ColumnBase]
Two non-null columns containing the categories and codes
respectively
"""
dtype: cudf.core.dtypes.CategoricalDtype
_codes: Optional[NumericalColumn]
_children: Tuple[NumericalColumn]
_VALID_BINARY_OPERATIONS = {
"__eq__",
"__ne__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
}
def __init__(
self,
dtype: CategoricalDtype,
mask: Optional[Buffer] = None,
size: Optional[int] = None,
offset: int = 0,
null_count: Optional[int] = None,
children: Tuple["column.ColumnBase", ...] = (),
):
if size is None:
for child in children:
assert child.offset == 0
assert child.base_mask is None
size = children[0].size
size = size - offset
if isinstance(dtype, pd.api.types.CategoricalDtype):
dtype = CategoricalDtype.from_pandas(dtype)
if not isinstance(dtype, CategoricalDtype):
raise ValueError("dtype must be instance of CategoricalDtype")
super().__init__(
data=None,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
self._codes = None
@property
def base_size(self) -> int:
return int(
(self.base_children[0].size) / self.base_children[0].dtype.itemsize
)
def __contains__(self, item: ScalarLike) -> bool:
try:
self._encode(item)
except ValueError:
return False
return self._encode(item) in self.as_numerical
def set_base_data(self, value):
if value is not None:
raise RuntimeError(
"CategoricalColumns do not use data attribute of Column, use "
"`set_base_children` instead"
)
else:
super().set_base_data(value)
def _process_values_for_isin(
self, values: Sequence
) -> Tuple[ColumnBase, ColumnBase]:
lhs = self
# We need to convert values to same type as self,
# hence passing dtype=self.dtype
rhs = cudf.core.column.as_column(values, dtype=self.dtype)
return lhs, rhs
def set_base_mask(self, value: Optional[Buffer]):
super().set_base_mask(value)
self._codes = None
def set_base_children(self, value: Tuple[ColumnBase, ...]):
super().set_base_children(value)
self._codes = None
@property
def children(self) -> Tuple[NumericalColumn]:
if self._children is None:
codes_column = self.base_children[0]
start = self.offset * codes_column.dtype.itemsize
end = start + self.size * codes_column.dtype.itemsize
codes_column = cast(
cudf.core.column.NumericalColumn,
column.build_column(
data=codes_column.base_data[start:end],
dtype=codes_column.dtype,
size=self.size,
),
)
self._children = (codes_column,)
return self._children
@property
def as_numerical(self) -> NumericalColumn:
return cast(
cudf.core.column.NumericalColumn,
column.build_column(
data=self.codes.data, dtype=self.codes.dtype, mask=self.mask
),
)
@property
def categories(self) -> ColumnBase:
return self.dtype.categories._values
@categories.setter
def categories(self, value):
self._dtype = CategoricalDtype(
categories=value, ordered=self.dtype.ordered
)
@property
def codes(self) -> NumericalColumn:
if self._codes is None:
self._codes = self.children[0].set_mask(self.mask)
return cast(cudf.core.column.NumericalColumn, self._codes)
@property
def ordered(self) -> bool:
return self.dtype.ordered
@ordered.setter
def ordered(self, value: bool):
self.dtype.ordered = value
def unary_operator(self, unaryop: str):
raise TypeError(
f"Series of dtype `category` cannot perform the operation: "
f"{unaryop}"
)
def __setitem__(self, key, value):
if cudf.api.types.is_scalar(
value
) and cudf._lib.scalar._is_null_host_scalar(value):
to_add_categories = 0
else:
if cudf.api.types.is_scalar(value):
arr = [value]
else:
arr = value
to_add_categories = len(
cudf.Index(arr, nan_as_null=False).difference(self.categories)
)
if to_add_categories > 0:
raise TypeError(
"Cannot setitem on a Categorical with a new "
"category, set the categories first"
)
if cudf.api.types.is_scalar(value):
value = self._encode(value) if value is not None else value
else:
value = cudf.core.column.as_column(value).astype(self.dtype)
value = value.codes
codes = self.codes
codes[key] = value
out = cudf.core.column.build_categorical_column(
categories=self.categories,
codes=codes,
mask=codes.base_mask,
size=codes.size,
offset=self.offset,
ordered=self.ordered,
)
self._mimic_inplace(out, inplace=True)
def _fill(
self,
fill_value: ScalarLike,
begin: int,
end: int,
inplace: bool = False,
) -> Self:
if end <= begin or begin >= self.size:
return self if inplace else self.copy()
fill_code = self._encode(fill_value)
fill_scalar = cudf._lib.scalar.as_device_scalar(
fill_code, self.codes.dtype
)
result = self if inplace else self.copy()
libcudf.filling.fill_in_place(result.codes, begin, end, fill_scalar)
return result
def slice(
self, start: int, stop: int, stride: Optional[int] = None
) -> Self:
codes = self.codes.slice(start, stop, stride)
return cast(
Self,
cudf.core.column.build_categorical_column(
categories=self.categories,
codes=cudf.core.column.build_column(
codes.base_data, dtype=codes.dtype
),
mask=codes.base_mask,
ordered=self.ordered,
size=codes.size,
offset=codes.offset,
),
)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
other = self._wrap_binop_normalization(other)
# TODO: This is currently just here to make mypy happy, but eventually
# we'll need to properly establish the APIs for these methods.
if not isinstance(other, CategoricalColumn):
raise ValueError
# Note: at this stage we are guaranteed that the dtypes are equal.
if not self.ordered and op not in {"__eq__", "__ne__", "NULL_EQUALS"}:
raise TypeError(
"The only binary operations supported by unordered "
"categorical columns are equality and inequality."
)
return self.as_numerical._binaryop(other.as_numerical, op)
def normalize_binop_value(self, other: ScalarLike) -> CategoricalColumn:
if isinstance(other, column.ColumnBase):
if not isinstance(other, CategoricalColumn):
return NotImplemented
if other.dtype != self.dtype:
raise TypeError(
"Categoricals can only compare with the same type"
)
return other
ary = column.full(
len(self), self._encode(other), dtype=self.codes.dtype
)
return column.build_categorical_column(
categories=self.dtype.categories._values,
codes=column.as_column(ary),
mask=self.base_mask,
ordered=self.dtype.ordered,
)
def sort_values(
self, ascending: bool = True, na_position="last"
) -> CategoricalColumn:
codes = self.as_numerical.sort_values(ascending, na_position)
col = column.build_categorical_column(
categories=self.dtype.categories._values,
codes=column.build_column(codes.base_data, dtype=codes.dtype),
mask=codes.base_mask,
size=codes.size,
ordered=self.dtype.ordered,
)
return col
def element_indexing(self, index: int) -> ScalarLike:
val = self.as_numerical.element_indexing(index)
return self._decode(int(val)) if val is not None else val
@property
def __cuda_array_interface__(self) -> Mapping[str, Any]:
raise TypeError(
"Categorical does not support `__cuda_array_interface__`."
" Please consider using `.codes` or `.categories`"
" if you need this functionality."
)
def to_pandas(
self, index: Optional[pd.Index] = None, **kwargs
) -> pd.Series:
if self.categories.dtype.kind == "f":
new_mask = bools_to_mask(self.notnull())
col = column.build_categorical_column(
categories=self.categories,
codes=column.as_column(self.codes, dtype=self.codes.dtype),
mask=new_mask,
ordered=self.dtype.ordered,
size=self.codes.size,
)
else:
col = self
signed_dtype = min_signed_type(len(col.categories))
codes = (
col.codes.astype(signed_dtype)
.fillna(_DEFAULT_CATEGORICAL_VALUE)
.values_host
)
if is_interval_dtype(col.categories.dtype):
# leaving out dropna because it temporarily changes an interval
# index into a struct and throws off results.
# TODO: work on interval index dropna
categories = col.categories.to_pandas()
else:
categories = col.categories.dropna(drop_nan=True).to_pandas()
data = pd.Categorical.from_codes(
codes, categories=categories, ordered=col.ordered
)
return pd.Series(data, index=index)
def to_arrow(self) -> pa.Array:
"""Convert to PyArrow Array."""
# arrow doesn't support unsigned codes
signed_type = (
min_signed_type(self.codes.max())
if self.codes.size > 0
else np.int8
)
codes = self.codes.astype(signed_type)
categories = self.categories
out_indices = codes.to_arrow()
out_dictionary = categories.to_arrow()
return pa.DictionaryArray.from_arrays(
out_indices,
out_dictionary,
ordered=self.ordered,
)
@property
def values_host(self) -> np.ndarray:
"""
Return a numpy representation of the CategoricalColumn.
"""
return self.to_pandas().values
@property
def values(self):
"""
Return a CuPy representation of the CategoricalColumn.
"""
raise NotImplementedError("cudf.Categorical is not yet implemented")
def clip(self, lo: ScalarLike, hi: ScalarLike) -> "column.ColumnBase":
return (
self.astype(self.categories.dtype).clip(lo, hi).astype(self.dtype)
)
def data_array_view(
self, *, mode="write"
) -> cuda.devicearray.DeviceNDArray:
return self.codes.data_array_view(mode=mode)
def unique(self) -> CategoricalColumn:
codes = self.as_numerical.unique()
return column.build_categorical_column(
categories=self.categories,
codes=column.build_column(codes.base_data, dtype=codes.dtype),
mask=codes.base_mask,
offset=codes.offset,
size=codes.size,
ordered=self.ordered,
)
def _encode(self, value) -> ScalarLike:
return self.categories.find_first_value(value)
def _decode(self, value: int) -> ScalarLike:
if value == _DEFAULT_CATEGORICAL_VALUE:
return None
return self.categories.element_indexing(value)
def find_and_replace(
self,
to_replace: ColumnLike,
replacement: ColumnLike,
all_nan: bool = False,
) -> CategoricalColumn:
"""
Return col with *to_replace* replaced with *replacement*.
"""
to_replace_col = column.as_column(to_replace)
if len(to_replace_col) == to_replace_col.null_count:
to_replace_col = to_replace_col.astype(self.categories.dtype)
replacement_col = column.as_column(replacement)
if len(replacement_col) == replacement_col.null_count:
replacement_col = replacement_col.astype(self.categories.dtype)
if type(to_replace_col) != type(replacement_col):
raise TypeError(
f"to_replace and value should be of same types,"
f"got to_replace dtype: {to_replace_col.dtype} and "
f"value dtype: {replacement_col.dtype}"
)
df = cudf.DataFrame._from_data(
{"old": to_replace_col, "new": replacement_col}
)
df = df.drop_duplicates(subset=["old"], keep="last", ignore_index=True)
if df._data["old"].null_count == 1:
fill_value = (
df._data["new"]
.apply_boolean_mask(df._data["old"].isnull())
.element_indexing(0)
)
# TODO: This line of code does not work because we cannot use the
# `in` operator on self.categories (which is a column). mypy
# realizes that this is wrong because __iter__ is not implemented.
# However, it seems that this functionality has been broken for a
# long time so for now we're just having mypy ignore and we'll come
# back to this.
if fill_value in self.categories: # type: ignore
replaced = self.fillna(fill_value)
else:
new_categories = self.categories.append(
column.as_column([fill_value])
)
replaced = self._set_categories(new_categories)
replaced = replaced.fillna(fill_value)
df = df.dropna(subset=["old"])
to_replace_col = df._data["old"]
replacement_col = df._data["new"]
else:
replaced = self
if df._data["new"].null_count > 0:
drop_values = df._data["old"].apply_boolean_mask(
df._data["new"].isnull()
)
cur_categories = replaced.categories
new_categories = cur_categories.apply_boolean_mask(
~cudf.Series(cur_categories.isin(drop_values))
)
replaced = replaced._set_categories(new_categories)
df = df.dropna(subset=["new"])
to_replace_col = df._data["old"]
replacement_col = df._data["new"]
# create a dataframe containing the pre-replacement categories
# and a column with the appropriate labels replaced.
# The index of this dataframe represents the original
# ints that map to the categories
cats_col = column.as_column(replaced.dtype.categories)
old_cats = cudf.DataFrame._from_data(
{
"cats": cats_col,
"cats_replace": cats_col.find_and_replace(
to_replace_col, replacement_col
),
}
)
# Construct the new categorical labels
# If a category is being replaced by an existing one, we
# want to map it to None. If it's totally new, we want to
# map it to the new label it is to be replaced by
dtype_replace = cudf.Series._from_data({None: replacement_col})
dtype_replace[dtype_replace.isin(cats_col)] = None
new_cats_col = cats_col.find_and_replace(
to_replace_col, dtype_replace._column
)
# anything we mapped to None, we want to now filter out since
# those categories don't exist anymore
# Resetting the index creates a column 'index' that associates
# the original integers to the new labels
bmask = new_cats_col.notnull()
new_cats_col = new_cats_col.apply_boolean_mask(bmask)
new_cats = cudf.DataFrame._from_data(
{
"index": cudf.core.column.arange(len(new_cats_col)),
"cats": new_cats_col,
}
)
# old_cats contains replaced categories and the ints that
# previously mapped to those categories and the index of
# new_cats is a RangeIndex that contains the new ints
catmap = old_cats.merge(
new_cats, left_on="cats_replace", right_on="cats", how="inner"
)
# The index of this frame is now the old ints, but the column
# named 'index', which came from the filtered categories,
# contains the new ints that we need to map to
to_replace_col = column.as_column(catmap.index).astype(
replaced.codes.dtype
)
replacement_col = catmap._data["index"].astype(replaced.codes.dtype)
replaced = column.as_column(replaced.codes)
output = libcudf.replace.replace(
replaced, to_replace_col, replacement_col
)
return column.build_categorical_column(
categories=new_cats["cats"],
codes=column.build_column(output.base_data, dtype=output.dtype),
mask=output.base_mask,
offset=output.offset,
size=output.size,
ordered=self.dtype.ordered,
)
def isnull(self) -> ColumnBase:
"""
Identify missing values in a CategoricalColumn.
"""
result = libcudf.unary.is_null(self)
if self.categories.dtype.kind == "f":
# Need to consider `np.nan` values in case
# of an underlying float column
categories = libcudf.unary.is_nan(self.categories)
if categories.any():
code = self._encode(np.nan)
result = result | (self.codes == cudf.Scalar(code))
return result
def notnull(self) -> ColumnBase:
"""
Identify non-missing values in a CategoricalColumn.
"""
result = libcudf.unary.is_valid(self)
if self.categories.dtype.kind == "f":
# Need to consider `np.nan` values in case
# of an underlying float column
categories = libcudf.unary.is_nan(self.categories)
if categories.any():
code = self._encode(np.nan)
result = result & (self.codes != cudf.Scalar(code))
return result
def fillna(
self,
fill_value: Any = None,
method: Any = None,
dtype: Optional[Dtype] = None,
) -> CategoricalColumn:
"""
Fill null values with *fill_value*
"""
if not self.nullable:
return self
if fill_value is not None:
fill_is_scalar = np.isscalar(fill_value)
if fill_is_scalar:
if fill_value == _DEFAULT_CATEGORICAL_VALUE:
fill_value = self.codes.dtype.type(fill_value)
else:
try:
fill_value = self._encode(fill_value)
fill_value = self.codes.dtype.type(fill_value)
except ValueError as err:
err_msg = "fill value must be in categories"
raise ValueError(err_msg) from err
else:
fill_value = column.as_column(fill_value, nan_as_null=False)
if isinstance(fill_value, CategoricalColumn):
if self.dtype != fill_value.dtype:
raise TypeError(
"Cannot set a Categorical with another, "
"without identical categories"
)
# TODO: only required if fill_value has a subset of the
# categories:
fill_value = fill_value._set_categories(
self.categories,
is_unique=True,
)
fill_value = column.as_column(fill_value.codes).astype(
self.codes.dtype
)
return super().fillna(value=fill_value, method=method)
def indices_of(
self, value: ScalarLike
) -> cudf.core.column.NumericalColumn:
return self.as_numerical.indices_of(self._encode(value))
@property
def is_monotonic_increasing(self) -> bool:
return bool(self.ordered) and self.as_numerical.is_monotonic_increasing
@property
def is_monotonic_decreasing(self) -> bool:
return bool(self.ordered) and self.as_numerical.is_monotonic_decreasing
def as_categorical_column(self, dtype: Dtype) -> CategoricalColumn:
if isinstance(dtype, str) and dtype == "category":
return self
if (
isinstance(
dtype, (cudf.core.dtypes.CategoricalDtype, pd.CategoricalDtype)
)
and (dtype.categories is None)
and (dtype.ordered is None)
):
return self
if isinstance(dtype, pd.CategoricalDtype):
dtype = CategoricalDtype(
categories=dtype.categories, ordered=dtype.ordered
)
if not isinstance(dtype, CategoricalDtype):
raise ValueError("dtype must be CategoricalDtype")
if not isinstance(self.categories, type(dtype.categories._values)):
# If both categories are of different Column types,
# return a column full of Nulls.
return _create_empty_categorical_column(self, dtype)
return self.set_categories(
new_categories=dtype.categories, ordered=bool(dtype.ordered)
)
def as_numerical_column(self, dtype: Dtype, **kwargs) -> NumericalColumn:
return self._get_decategorized_column().as_numerical_column(dtype)
def as_string_column(self, dtype, format=None, **kwargs) -> StringColumn:
return self._get_decategorized_column().as_string_column(
dtype, format=format
)
def as_datetime_column(self, dtype, **kwargs) -> DatetimeColumn:
return self._get_decategorized_column().as_datetime_column(
dtype, **kwargs
)
def as_timedelta_column(self, dtype, **kwargs) -> TimeDeltaColumn:
return self._get_decategorized_column().as_timedelta_column(
dtype, **kwargs
)
def _get_decategorized_column(self) -> ColumnBase:
if self.null_count == len(self):
# self.categories is empty; just return codes
return self.codes
gather_map = self.codes.astype(libcudf.types.size_type_dtype).fillna(0)
out = self.categories.take(gather_map)
out = out.set_mask(self.mask)
return out
def copy(self, deep: bool = True) -> Self:
result_col = super().copy(deep=deep)
if deep:
result_col.categories = libcudf.copying.copy_column(
self.dtype._categories
)
return result_col
@cached_property
def memory_usage(self) -> int:
return self.categories.memory_usage + self.codes.memory_usage
def _mimic_inplace(
self, other_col: ColumnBase, inplace: bool = False
) -> Optional[Self]:
out = super()._mimic_inplace(other_col, inplace=inplace)
if inplace and isinstance(other_col, CategoricalColumn):
self._codes = other_col._codes
return out
def view(self, dtype: Dtype) -> ColumnBase:
raise NotImplementedError(
"Categorical column views are not currently supported"
)
@staticmethod
def _concat(
objs: abc.MutableSequence[CategoricalColumn],
) -> CategoricalColumn:
# TODO: This function currently assumes it is being called from
# column.concat_columns, at least to the extent that all the
# preprocessing in that function has already been done. That should be
# improved as the concatenation API is solidified.
# Find the first non-null column:
head = next((obj for obj in objs if obj.valid_count), objs[0])
# Combine and de-dupe the categories
cats = column.concat_columns([o.categories for o in objs]).unique()
objs = [o._set_categories(cats, is_unique=True) for o in objs]
codes = [o.codes for o in objs]
newsize = sum(map(len, codes))
if newsize > libcudf.MAX_COLUMN_SIZE:
raise MemoryError(
f"Result of concat cannot have "
f"size > {libcudf.MAX_COLUMN_SIZE_STR}"
)
elif newsize == 0:
codes_col = column.column_empty(0, head.codes.dtype, masked=True)
else:
# Filter out inputs that have 0 length, then concatenate.
codes = [o for o in codes if len(o)]
codes_col = libcudf.concat.concat_columns(objs)
return column.build_categorical_column(
categories=column.as_column(cats),
codes=column.build_column(
codes_col.base_data, dtype=codes_col.dtype
),
mask=codes_col.base_mask,
size=codes_col.size,
offset=codes_col.offset,
)
def _with_type_metadata(
self: CategoricalColumn, dtype: Dtype
) -> CategoricalColumn:
if isinstance(dtype, CategoricalDtype):
return column.build_categorical_column(
categories=dtype.categories._values,
codes=column.build_column(
self.codes.base_data, dtype=self.codes.dtype
),
mask=self.codes.base_mask,
ordered=dtype.ordered,
size=self.codes.size,
offset=self.codes.offset,
null_count=self.codes.null_count,
)
return self
def set_categories(
self,
new_categories: Any,
ordered: bool = False,
rename: bool = False,
) -> CategoricalColumn:
# See CategoricalAccessor.set_categories.
ordered = ordered if ordered is not None else self.ordered
new_categories = column.as_column(new_categories)
if isinstance(new_categories, CategoricalColumn):
new_categories = new_categories.categories
# when called with rename=True, the pandas behavior is
# to replace the current category values with the new
# categories.
if rename:
# enforce same length
if len(new_categories) != len(self.categories):
raise ValueError(
"new_categories must have the same "
"number of items as old categories"
)
out_col = column.build_categorical_column(
categories=new_categories,
codes=self.base_children[0],
mask=self.base_mask,
size=self.size,
offset=self.offset,
ordered=ordered,
)
else:
out_col = self
if type(out_col.categories) is not type(new_categories):
# If both categories are of different Column types,
# return a column full of Nulls.
out_col = _create_empty_categorical_column(
self,
CategoricalDtype(
categories=new_categories, ordered=ordered
),
)
elif (
not out_col._categories_equal(new_categories, ordered=True)
or not self.ordered == ordered
):
out_col = out_col._set_categories(
new_categories,
ordered=ordered,
)
return out_col
def _categories_equal(
self, new_categories: ColumnBase, ordered=False
) -> bool:
cur_categories = self.categories
if len(new_categories) != len(cur_categories):
return False
if new_categories.dtype != cur_categories.dtype:
return False
# if order doesn't matter, sort before the equals call below
if not ordered:
cur_categories = cudf.Series(cur_categories).sort_values(
ignore_index=True
)
new_categories = cudf.Series(new_categories).sort_values(
ignore_index=True
)
return cur_categories.equals(new_categories)
def _set_categories(
self,
new_categories: Any,
is_unique: bool = False,
ordered: bool = False,
) -> CategoricalColumn:
"""Returns a new CategoricalColumn with the categories set to the
specified *new_categories*.
Notes
-----
Assumes ``new_categories`` is the same dtype as the current categories
"""
cur_cats = column.as_column(self.categories)
new_cats = column.as_column(new_categories)
# Join the old and new categories to build a map from
# old to new codes, inserting na_sentinel for any old
# categories that don't exist in the new categories
# Ensure new_categories is unique first
if not (is_unique or new_cats.is_unique):
new_cats = cudf.Series(new_cats)._column.unique()
cur_codes = self.codes
max_cat_size = (
len(cur_cats) if len(cur_cats) > len(new_cats) else len(new_cats)
)
out_code_dtype = min_unsigned_type(max_cat_size)
cur_order = column.arange(len(cur_codes))
old_codes = column.arange(len(cur_cats), dtype=out_code_dtype)
new_codes = column.arange(len(new_cats), dtype=out_code_dtype)
new_df = cudf.DataFrame._from_data(
data={"new_codes": new_codes, "cats": new_cats}
)
old_df = cudf.DataFrame._from_data(
data={"old_codes": old_codes, "cats": cur_cats}
)
cur_df = cudf.DataFrame._from_data(
data={"old_codes": cur_codes, "order": cur_order}
)
# Join the old and new categories and line up their codes
df = old_df.merge(new_df, on="cats", how="left")
# Join the old and new codes to "recode" the codes data buffer
df = cur_df.merge(df, on="old_codes", how="left")
df = df.sort_values(by="order")
df.reset_index(drop=True, inplace=True)
ordered = ordered if ordered is not None else self.ordered
new_codes = df._data["new_codes"]
# codes can't have masks, so take mask out before moving in
return column.build_categorical_column(
categories=new_cats,
codes=column.build_column(
new_codes.base_data, dtype=new_codes.dtype
),
mask=new_codes.base_mask,
size=new_codes.size,
offset=new_codes.offset,
ordered=ordered,
)
def reorder_categories(
self,
new_categories: Any,
ordered: bool = False,
) -> CategoricalColumn:
new_categories = column.as_column(new_categories)
# Compare new_categories against current categories.
# Ignore order for comparison because we're only interested
# in whether new_categories has all the same values as the
# current set of categories.
if not self._categories_equal(new_categories, ordered=False):
raise ValueError(
"items in new_categories are not the same as in "
"old categories"
)
return self._set_categories(new_categories, ordered=ordered)
def as_ordered(self):
out_col = self
if not out_col.ordered:
out_col = column.build_categorical_column(
categories=self.categories,
codes=self.codes,
mask=self.base_mask,
size=self.base_size,
offset=self.offset,
ordered=True,
)
return out_col
def as_unordered(self):
out_col = self
if out_col.ordered:
out_col = column.build_categorical_column(
categories=self.categories,
codes=self.codes,
mask=self.base_mask,
size=self.base_size,
offset=self.offset,
ordered=False,
)
return out_col
def _create_empty_categorical_column(
categorical_column: CategoricalColumn, dtype: "CategoricalDtype"
) -> CategoricalColumn:
return column.build_categorical_column(
categories=column.as_column(dtype.categories),
codes=column.as_column(
column.full(
categorical_column.size,
_DEFAULT_CATEGORICAL_VALUE,
categorical_column.codes.dtype,
)
),
offset=categorical_column.offset,
size=categorical_column.size,
mask=categorical_column.base_mask,
ordered=dtype.ordered,
)
def pandas_categorical_as_column(
categorical: ColumnLike, codes: Optional[ColumnLike] = None
) -> CategoricalColumn:
"""Creates a CategoricalColumn from a pandas.Categorical
If ``codes`` is defined, use it instead of ``categorical.codes``
"""
codes = categorical.codes if codes is None else codes
codes = column.as_column(codes)
valid_codes = codes != codes.dtype.type(_DEFAULT_CATEGORICAL_VALUE)
mask = None
if not valid_codes.all():
mask = bools_to_mask(valid_codes)
return column.build_categorical_column(
categories=categorical.categories,
codes=column.build_column(codes.base_data, codes.dtype),
size=codes.size,
mask=mask,
ordered=categorical.ordered,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/string.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
from __future__ import annotations
import re
import warnings
from functools import cached_property
from typing import (
TYPE_CHECKING,
Any,
Optional,
Sequence,
Tuple,
Union,
cast,
overload,
)
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda
import cudf
import cudf.api.types
from cudf import _lib as libcudf
from cudf._lib import string_casting as str_cast, strings as libstrings
from cudf._lib.column import Column
from cudf._lib.types import size_type_dtype
from cudf.api.types import (
is_integer,
is_list_dtype,
is_scalar,
is_string_dtype,
)
from cudf.core.buffer import Buffer
from cudf.core.column import column, datetime
from cudf.core.column.column import ColumnBase
from cudf.core.column.methods import ColumnMethods
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import can_convert_to_column
def str_to_boolean(column: StringColumn):
"""Takes in string column and returns boolean column"""
return (
libstrings.count_characters(column) > cudf.Scalar(0, dtype="int8")
).fillna(False)
if TYPE_CHECKING:
from cudf._typing import (
ColumnBinaryOperand,
ColumnLike,
Dtype,
ScalarLike,
SeriesOrIndex,
)
_str_to_numeric_typecast_functions = {
cudf.api.types.dtype("int8"): str_cast.stoi8,
cudf.api.types.dtype("int16"): str_cast.stoi16,
cudf.api.types.dtype("int32"): str_cast.stoi,
cudf.api.types.dtype("int64"): str_cast.stol,
cudf.api.types.dtype("uint8"): str_cast.stoui8,
cudf.api.types.dtype("uint16"): str_cast.stoui16,
cudf.api.types.dtype("uint32"): str_cast.stoui,
cudf.api.types.dtype("uint64"): str_cast.stoul,
cudf.api.types.dtype("float32"): str_cast.stof,
cudf.api.types.dtype("float64"): str_cast.stod,
cudf.api.types.dtype("bool"): str_to_boolean,
}
_numeric_to_str_typecast_functions = {
cudf.api.types.dtype("int8"): str_cast.i8tos,
cudf.api.types.dtype("int16"): str_cast.i16tos,
cudf.api.types.dtype("int32"): str_cast.itos,
cudf.api.types.dtype("int64"): str_cast.ltos,
cudf.api.types.dtype("uint8"): str_cast.ui8tos,
cudf.api.types.dtype("uint16"): str_cast.ui16tos,
cudf.api.types.dtype("uint32"): str_cast.uitos,
cudf.api.types.dtype("uint64"): str_cast.ultos,
cudf.api.types.dtype("float32"): str_cast.ftos,
cudf.api.types.dtype("float64"): str_cast.dtos,
cudf.api.types.dtype("bool"): str_cast.from_booleans,
}
_datetime_to_str_typecast_functions = {
# TODO: support Date32 UNIX days
# cudf.api.types.dtype("datetime64[D]"): str_cast.int2timestamp,
cudf.api.types.dtype("datetime64[s]"): str_cast.int2timestamp,
cudf.api.types.dtype("datetime64[ms]"): str_cast.int2timestamp,
cudf.api.types.dtype("datetime64[us]"): str_cast.int2timestamp,
cudf.api.types.dtype("datetime64[ns]"): str_cast.int2timestamp,
}
_timedelta_to_str_typecast_functions = {
cudf.api.types.dtype("timedelta64[s]"): str_cast.int2timedelta,
cudf.api.types.dtype("timedelta64[ms]"): str_cast.int2timedelta,
cudf.api.types.dtype("timedelta64[us]"): str_cast.int2timedelta,
cudf.api.types.dtype("timedelta64[ns]"): str_cast.int2timedelta,
}
def _is_supported_regex_flags(flags):
return flags == 0 or (
(flags & (re.MULTILINE | re.DOTALL) != 0)
and (flags & ~(re.MULTILINE | re.DOTALL) == 0)
)
class StringMethods(ColumnMethods):
"""
Vectorized string functions for Series and Index.
This mimics pandas ``df.str`` interface. nulls stay null
unless handled otherwise by a particular method.
Patterned after Python's string methods, with some
inspiration from R's stringr package.
"""
_column: StringColumn
def __init__(self, parent):
value_type = (
parent.dtype.leaf_type
if is_list_dtype(parent.dtype)
else parent.dtype
)
if not is_string_dtype(value_type):
raise AttributeError(
"Can only use .str accessor with string values"
)
super().__init__(parent=parent)
def htoi(self) -> SeriesOrIndex:
"""
Returns integer value represented by each hex string.
String is interpreted to have hex (base-16) characters.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1234", "ABCDEF", "1A2", "cafe"])
>>> s.str.htoi()
0 4660
1 11259375
2 418
3 51966
dtype: int64
"""
out = str_cast.htoi(self._column)
return self._return_or_inplace(out, inplace=False)
hex_to_int = htoi
def ip2int(self) -> SeriesOrIndex:
"""
This converts ip strings to integers
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["12.168.1.1", "10.0.0.1"])
>>> s.str.ip2int()
0 212336897
1 167772161
dtype: int64
Returns 0's if any string is not an IP.
>>> s = cudf.Series(["12.168.1.1", "10.0.0.1", "abc"])
>>> s.str.ip2int()
0 212336897
1 167772161
2 0
dtype: int64
"""
out = str_cast.ip2int(self._column)
return self._return_or_inplace(out, inplace=False)
ip_to_int = ip2int
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def len(self) -> SeriesOrIndex:
r"""
Computes the length of each element in the Series/Index.
Returns
-------
Series or Index of int
A Series or Index of integer values
indicating the length of each element in the Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["dog", "", "\n", None])
>>> s.str.len()
0 3
1 0
2 1
3 <NA>
dtype: int32
"""
return self._return_or_inplace(
libstrings.count_characters(self._column)
)
def byte_count(self) -> SeriesOrIndex:
"""
Computes the number of bytes of each string in the Series/Index.
Returns
-------
Series or Index of int
A Series or Index of integer values
indicating the number of bytes of each strings in the
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abc","d","ef"])
>>> s.str.byte_count()
0 3
1 1
2 2
dtype: int32
>>> s = cudf.Series(["Hello", "Bye", "Thanks 😊"])
>>> s.str.byte_count()
0 5
1 3
2 11
dtype: int32
"""
return self._return_or_inplace(
libstrings.count_bytes(self._column),
)
@overload
def cat(
self, sep: Optional[str] = None, na_rep: Optional[str] = None
) -> str:
...
@overload
def cat(
self, others, sep: Optional[str] = None, na_rep: Optional[str] = None
) -> Union[SeriesOrIndex, "cudf.core.column.string.StringColumn"]:
...
def cat(self, others=None, sep=None, na_rep=None):
"""
Concatenate strings in the Series/Index with given separator.
If ``others`` is specified, this function concatenates the Series/Index
and elements of others element-wise. If others is not passed, then all
values in the Series/Index are concatenated into a single string with
a given sep.
Parameters
----------
others : Series or List of str
Strings to be appended.
The number of strings must match ``size()`` of this instance.
This must be either a Series of string dtype or a Python
list of strings.
sep : str
If specified, this separator will be appended to each string
before appending the others.
na_rep : str
This character will take the place of any null strings
(not empty strings) in either list.
- If ``na_rep`` is ``None``, and ``others`` is ``None``,
missing values in the Series/Index are
omitted from the result.
- If ``na_rep`` is ``None``, and ``others`` is
not ``None``, a row containing a missing value
in any of the columns (before concatenation)
will have a missing value in the result.
Returns
-------
concat : str or Series/Index of str dtype
If ``others`` is ``None``, ``str`` is returned,
otherwise a ``Series/Index`` (same type as caller)
of str dtype is returned.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'b', None, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using na_rep, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If others is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 <NA>
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using na_rep
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If sep is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
"""
if sep is None:
sep = ""
if others is None:
data = libstrings.join(
self._column,
cudf.Scalar(sep),
cudf.Scalar(na_rep, "str"),
)
else:
other_cols = _get_cols_list(self._parent, others)
all_cols = [self._column] + other_cols
data = libstrings.concatenate(
all_cols,
cudf.Scalar(sep),
cudf.Scalar(na_rep, "str"),
)
if len(data) == 1 and data.null_count == 1:
data = [""]
# We only want to keep the index if we are adding something to each
# row, not if we are joining all the rows into a single string.
out = self._return_or_inplace(data, retain_index=others is not None)
if len(out) == 1 and others is None:
if isinstance(out, cudf.Series):
out = out.iloc[0]
else:
out = out[0]
return out
def join(
self, sep=None, string_na_rep=None, sep_na_rep=None
) -> SeriesOrIndex:
"""
Join lists contained as elements in the Series/Index with passed
delimiter.
If the elements of a Series are lists themselves, join the content of
these lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
In the special case that the lists in the Series contain only ``None``,
a `<NA>`/`None` value will always be returned.
Parameters
----------
sep : str or array-like
If str, the delimiter is used between list entries.
If array-like, the string at a position is used as a
delimiter for corresponding row of the list entries.
string_na_rep : str, default None
This character will take the place of null strings
(not empty strings) in the Series but will be considered
only if the Series contains list elements and those lists have
at least one non-null string. If ``string_na_rep`` is ``None``,
it defaults to empty space "".
sep_na_rep : str, default None
This character will take the place of any null strings
(not empty strings) in `sep`. This parameter can be used
only if `sep` is array-like. If ``sep_na_rep`` is ``None``,
it defaults to empty space "".
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of
the delimiter.
Raises
------
ValueError
- If ``sep_na_rep`` is supplied when ``sep`` is str.
- If ``sep`` is array-like and not of equal length with Series/Index.
TypeError
- If ``string_na_rep`` or ``sep_na_rep`` are not scalar values.
- If ``sep`` is not of following types: str or array-like.
Examples
--------
>>> import cudf
>>> ser = cudf.Series([['a', 'b', 'c'], ['d', 'e'], ['f'], ['g', ' ', 'h']])
>>> ser
0 [a, b, c]
1 [d, e]
2 [f]
3 [g, , h]
dtype: list
>>> ser.str.join(sep='-')
0 a-b-c
1 d-e
2 f
3 g- -h
dtype: object
``sep`` can an array-like input:
>>> ser.str.join(sep=['-', '+', '.', '='])
0 a-b-c
1 d+e
2 f
3 g= =h
dtype: object
If the actual series doesn't have lists, each character is joined
by `sep`:
>>> ser = cudf.Series(['abc', 'def', 'ghi'])
>>> ser
0 abc
1 def
2 ghi
dtype: object
>>> ser.str.join(sep='_')
0 a_b_c
1 d_e_f
2 g_h_i
dtype: object
We can replace `<NA>`/`None` values present in lists using
``string_na_rep`` if the lists contain at least one valid string
(lists containing all `None` will result in a `<NA>`/`None` value):
>>> ser = cudf.Series([['a', 'b', None], [None, None, None], None, ['c', 'd']])
>>> ser
0 [a, b, None]
1 [None, None, None]
2 None
3 [c, d]
dtype: list
>>> ser.str.join(sep='_', string_na_rep='k')
0 a_b_k
1 <NA>
2 <NA>
3 c_d
dtype: object
We can replace `<NA>`/`None` values present in lists of ``sep``
using ``sep_na_rep``:
>>> ser.str.join(sep=[None, '^', '.', '-'], sep_na_rep='+')
0 a+b+
1 <NA>
2 <NA>
3 c-d
dtype: object
""" # noqa E501
if sep is None:
sep = ""
if string_na_rep is None:
string_na_rep = ""
if is_scalar(sep) and sep_na_rep:
raise ValueError(
"sep_na_rep cannot be defined when `sep` is scalar."
)
if sep_na_rep is None:
sep_na_rep = ""
if not is_scalar(string_na_rep):
raise TypeError(
f"string_na_rep should be a string scalar, got {string_na_rep}"
f" of type : {type(string_na_rep)}"
)
if isinstance(self._column, cudf.core.column.ListColumn):
strings_column = self._column
else:
# If self._column is not a ListColumn, we will have to
# split each row by character and create a ListColumn out of it.
strings_column = self._split_by_character()
if is_scalar(sep):
data = libstrings.join_lists_with_scalar(
strings_column, cudf.Scalar(sep), cudf.Scalar(string_na_rep)
)
elif can_convert_to_column(sep):
sep_column = column.as_column(sep)
if len(sep_column) != len(strings_column):
raise ValueError(
f"sep should be of similar size to the series, "
f"got: {len(sep_column)}, expected: {len(strings_column)}"
)
if not is_scalar(sep_na_rep):
raise TypeError(
f"sep_na_rep should be a string scalar, got {sep_na_rep} "
f"of type: {type(sep_na_rep)}"
)
data = libstrings.join_lists_with_column(
strings_column,
sep_column,
cudf.Scalar(string_na_rep),
cudf.Scalar(sep_na_rep),
)
else:
raise TypeError(
f"sep should be an str, array-like or Series object, "
f"found {type(sep)}"
)
return self._return_or_inplace(data)
def _split_by_character(self):
result_col = libstrings.character_tokenize(self._column)
offset_col = self._column.children[0]
return cudf.core.column.ListColumn(
size=len(self._column),
dtype=cudf.ListDtype(self._column.dtype),
mask=self._column.mask,
offset=0,
null_count=self._column.null_count,
children=(offset_col, result_col),
)
def extract(
self, pat: str, flags: int = 0, expand: bool = True
) -> SeriesOrIndex:
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the first
match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags to pass through to the regex engine (e.g. re.MULTILINE)
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group or
DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series/Index
A DataFrame with one row for each subject string, and one column
for each group. If `expand=False` and `pat` has only one capture
group, then return a Series/Index.
Notes
-----
The `flags` parameter currently only supports re.DOTALL and
re.MULTILINE.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 <NA> <NA>
A pattern with one group will return a DataFrame with one
column if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 <NA>
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 <NA>
dtype: object
""" # noqa W605
if not _is_supported_regex_flags(flags):
raise NotImplementedError(
"unsupported value for `flags` parameter"
)
data, _ = libstrings.extract(self._column, pat, flags)
if len(data) == 1 and expand is False:
data = next(iter(data.values()))
else:
data = data
return self._return_or_inplace(data, expand=expand)
def contains(
self,
pat: Union[str, Sequence],
case: bool = True,
flags: int = 0,
na=np.nan,
regex: bool = True,
) -> SeriesOrIndex:
r"""
Test if pattern or regex is contained within a string of a Series or
Index.
Return boolean Series or Index based on whether a given pattern or
regex is contained within a string of a Series or Index.
Parameters
----------
pat : str or list-like
Character sequence or regular expression.
If ``pat`` is list-like then regular expressions are not
accepted.
flags : int, default 0 (no flags)
Flags to pass through to the regex engine (e.g. re.MULTILINE)
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of bool dtype
A Series/Index of boolean dtype indicating whether the given
pattern is contained within the string of each element of the
Series/Index.
Notes
-----
The parameters `case` and `na` are not yet supported and will
raise a NotImplementedError if anything other than the default
value is set.
The `flags` parameter currently only supports re.DOTALL and
re.MULTILINE.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['Mouse', 'dog', 'house and parrot', '23', None])
>>> s1
0 Mouse
1 dog
2 house and parrot
3 23
4 <NA>
dtype: object
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 <NA>
dtype: bool
Returning an Index of booleans using only a literal pattern.
>>> data = ['Mouse', 'dog', 'house and parrot', '23.0', np.NaN]
>>> idx = cudf.Index(data)
>>> idx
StringIndex(['Mouse' 'dog' 'house and parrot' '23.0' None], dtype='object')
>>> idx.str.contains('23', regex=False)
GenericIndex([False, False, False, True, <NA>], dtype='bool')
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 <NA>
dtype: bool
Returning any digit using regular expression.
>>> s1.str.contains('\d', regex=True)
0 False
1 False
2 False
3 True
4 <NA>
dtype: bool
Ensure ``pat`` is a not a literal pattern when ``regex`` is set
to True. Note in the following example one might expect
only `s2[1]` and `s2[3]` to return True. However,
'.0' as a regex matches any character followed by a 0.
>>> s2 = cudf.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
The ``pat`` may also be a sequence of strings in which case
the individual strings are searched in corresponding rows.
>>> s2 = cudf.Series(['house', 'dog', 'and', '', ''])
>>> s1.str.contains(s2)
0 False
1 True
2 True
3 True
4 <NA>
dtype: bool
""" # noqa W605
if na is not np.nan:
raise NotImplementedError("`na` parameter is not yet supported")
if regex and isinstance(pat, re.Pattern):
flags = pat.flags & ~re.U
pat = pat.pattern
if not _is_supported_regex_flags(flags):
raise NotImplementedError(
"unsupported value for `flags` parameter"
)
if regex and not case:
raise NotImplementedError(
"`case=False` only supported when `regex=False`"
)
if is_scalar(pat):
if regex:
result_col = libstrings.contains_re(self._column, pat, flags)
else:
if case is False:
input_column = libstrings.to_lower(self._column)
pat = cudf.Scalar(pat.lower(), dtype="str") # type: ignore
else:
input_column = self._column
pat = cudf.Scalar(pat, dtype="str") # type: ignore
result_col = libstrings.contains(input_column, pat)
else:
# TODO: we silently ignore the `regex=` flag here
if case is False:
input_column = libstrings.to_lower(self._column)
pat = libstrings.to_lower(column.as_column(pat, dtype="str"))
else:
input_column = self._column
pat = column.as_column(pat, dtype="str")
result_col = libstrings.contains_multiple(input_column, pat)
return self._return_or_inplace(result_col)
def like(self, pat: str, esc: Optional[str] = None) -> SeriesOrIndex:
"""
Test if a like pattern matches a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern
matches strings in a Series or Index.
Parameters
----------
pat : str
Pattern for matching. Use '%' for any number of any character
including no characters. Use '_' for any single character.
esc : str
Character to use if escape is necessary to match '%' or '_'
literals.
Returns
-------
Series/Index of bool dtype
A Series/Index of boolean dtype indicating whether the given
pattern matches the string of each element of the Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a', 'b' ,'ddbc', '%bb'])
>>> s.str.like('%b_')
0 False
1 False
2 False
3 True
4 True
dtype: boolean
Parameter `esc` can be used to match a wildcard literal.
>>> s.str.like('/%b_', esc='/' )
0 False
1 False
2 False
3 False
4 True
dtype: boolean
"""
if not isinstance(pat, str):
raise TypeError(
f"expected a string object, not {type(pat).__name__}"
)
if esc is None:
esc = ""
if not isinstance(esc, str):
raise TypeError(
f"expected a string object, not {type(esc).__name__}"
)
if len(esc) > 1:
raise ValueError(
"expected esc to contain less than or equal to 1 characters"
)
result_col = libstrings.like(
self._column, cudf.Scalar(pat, "str"), cudf.Scalar(esc, "str")
)
return self._return_or_inplace(result_col)
def repeat(
self,
repeats: Union[int, Sequence],
) -> SeriesOrIndex:
"""
Duplicate each string in the Series or Index.
Equivalent to `str.repeat()
<https://pandas.pydata.org/docs/reference/api/pandas.Series.str.repeat.html>`_.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = cudf.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
if can_convert_to_column(repeats):
return self._return_or_inplace(
libstrings.repeat_sequence(
self._column,
column.as_column(repeats, dtype="int"),
),
)
return self._return_or_inplace(
libstrings.repeat_scalar(self._column, repeats)
)
def replace(
self,
pat: Union[str, Sequence],
repl: Union[str, Sequence],
n: int = -1,
case=None,
flags: int = 0,
regex: bool = True,
) -> SeriesOrIndex:
"""
Replace occurrences of pattern/regex in the Series/Index with some
other string. Equivalent to `str.replace()
<https://docs.python.org/3/library/stdtypes.html#str.replace>`_
or `re.sub()
<https://docs.python.org/3/library/re.html#re.sub>`_.
Parameters
----------
pat : str or list-like
String(s) to be replaced as a character sequence or regular
expression.
repl : str or list-like
String(s) to be used as replacement.
n : int, default -1 (all)
Number of replacements to make from the start.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of str dtype
A copy of the object with all matching occurrences of pat replaced
by repl.
Notes
-----
The parameters `case` and `flags` are not yet supported and will raise
a `NotImplementedError` if anything other than the default value
is set.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['foo', 'fuz', None])
>>> s
0 foo
1 fuz
2 <NA>
dtype: object
When pat is a string and regex is True (the default), the given pat
is compiled as a regex. When repl is a string, it replaces matching
regex patterns as with ``re.sub()``. NaN value(s) in the Series
are left as is:
>>> s.str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 <NA>
dtype: object
When pat is a string and `regex` is False, every pat is replaced
with repl as with ``str.replace()``:
>>> s.str.replace('f.', 'ba', regex=False)
0 foo
1 fuz
2 <NA>
dtype: object
"""
if case is not None:
raise NotImplementedError("`case` parameter is not yet supported")
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
if can_convert_to_column(pat) and can_convert_to_column(repl):
if n != -1:
warnings.warn(
"`n` parameter is not supported when "
"`pat` and `repl` are list-like inputs"
)
return self._return_or_inplace(
libstrings.replace_multi_re(
self._column,
pat,
column.as_column(repl, dtype="str"),
)
if regex
else libstrings.replace_multi(
self._column,
column.as_column(pat, dtype="str"),
column.as_column(repl, dtype="str"),
),
)
# Pandas treats 0 as all
if n == 0:
n = -1
# If 'pat' is re.Pattern then get the pattern string from it
if regex and isinstance(pat, re.Pattern):
pat = pat.pattern
# Pandas forces non-regex replace when pat is a single-character
return self._return_or_inplace(
libstrings.replace_re(
self._column, pat, cudf.Scalar(repl, "str"), n
)
if regex is True and len(pat) > 1
else libstrings.replace(
self._column,
cudf.Scalar(pat, "str"),
cudf.Scalar(repl, "str"),
n,
),
)
def replace_with_backrefs(self, pat: str, repl: str) -> SeriesOrIndex:
r"""
Use the ``repl`` back-ref template to create a new string
with the extracted elements found using the ``pat`` expression.
Parameters
----------
pat : str or compiled regex
Regex with groupings to identify extract sections.
This should not be a compiled regex.
repl : str
String template containing back-reference indicators.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["A543","Z756"])
>>> s.str.replace_with_backrefs('(\\d)(\\d)', 'V\\2\\1')
0 AV453
1 ZV576
dtype: object
"""
# If 'pat' is re.Pattern then get the pattern string from it
if isinstance(pat, re.Pattern):
pat = pat.pattern
return self._return_or_inplace(
libstrings.replace_with_backrefs(self._column, pat, repl)
)
def slice(
self,
start: Optional[int] = None,
stop: Optional[int] = None,
step: Optional[int] = None,
) -> SeriesOrIndex:
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series/Index of str dtype
Series or Index from sliced substring from
original string object.
See Also
--------
slice_replace
Replace a slice with a string.
get
Return element at position. Equivalent
to ``Series.str.slice(start=i, stop=i+1)``
with ``i`` being the position.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(start=-1)
0 a
1 x
2 n
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
"""
return self._return_or_inplace(
libstrings.slice_strings(self._column, start, stop, step),
)
def isinteger(self) -> SeriesOrIndex:
"""
Check whether all characters in each string form integer.
If a string has zero characters, False is returned for
that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1", "0.1", "+100", "-15", "abc"])
>>> s.str.isinteger()
0 True
1 False
2 True
3 True
4 False
dtype: bool
>>> s = cudf.Series(["this is plan text", "", "10 10"])
>>> s.str.isinteger()
0 False
1 False
2 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_integer(self._column))
def ishex(self) -> SeriesOrIndex:
"""
Check whether all characters in each string form a hex integer.
If a string has zero characters, False is returned for
that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["", "123DEF", "0x2D3", "-15", "abc"])
>>> s.str.ishex()
0 False
1 True
2 True
3 False
4 True
dtype: bool
"""
return self._return_or_inplace(str_cast.is_hex(self._column))
def istimestamp(self, format: str) -> SeriesOrIndex:
"""
Check whether all characters in each string can be converted to
a timestamp using the given format.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["20201101", "192011", "18200111", "2120-11-01"])
>>> s.str.istimestamp("%Y%m%d")
0 True
1 False
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(
str_cast.istimestamp(self._column, format)
)
def isfloat(self) -> SeriesOrIndex:
r"""
Check whether all characters in each string form floating value.
If a string has zero characters, False is returned for
that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1.1", "0.123213", "+0.123", "-100.0001", "234",
... "3-"])
>>> s.str.isfloat()
0 True
1 True
2 True
3 True
4 True
5 False
dtype: bool
>>> s = cudf.Series(["this is plain text", "\t\n", "9.9", "9.9.9"])
>>> s.str.isfloat()
0 False
1 False
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_float(self._column))
def isdecimal(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are decimal.
This is equivalent to running the Python string method
`str.isdecimal()
<https://docs.python.org/3/library/stdtypes.html#str.isdecimal>`_
for each element of the Series/Index.
If a string has zero characters, False is returned for
that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s3 = cudf.Series(['23', '³', '⅕', ''])
The s3.str.isdecimal method checks for characters used to form
numbers in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_decimal(self._column))
def isalnum(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are alphanumeric.
This is equivalent to running the Python string method
`str.isalnum()
<https://docs.python.org/3/library/stdtypes.html#str.isalnum>`_
for each element of the Series/Index. If a string has zero
characters, False is returned for that check.
Equivalent to: ``isalpha() or isdigit() or isnumeric() or isdecimal()``
Returns
-------
Series or Index of bool
Series or Index of boolean values with the
same length as the original Series/Index.
See Also
--------
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with
any additional punctuation or whitespace will
evaluate to false for an alphanumeric check.
>>> s2 = cudf.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_alnum(self._column))
def isalpha(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are alphabetic.
This is equivalent to running the Python string method
`str.isalpha()
<https://docs.python.org/3/library/stdtypes.html#str.isalpha>`_
for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length
as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_alpha(self._column))
def isdigit(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are digits.
This is equivalent to running the Python string method
`str.isdigit()
<https://docs.python.org/3/library/stdtypes.html#str.isdigit>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['23', '³', '⅕', ''])
The ``s.str.isdigit`` method is the same as ``s.str.isdecimal`` but
also includes special digits, like superscripted and
subscripted digits in unicode.
>>> s.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_digit(self._column))
def isnumeric(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are numeric.
This is equivalent to running the Python string method
`str.isnumeric()
<https://docs.python.org/3/library/stdtypes.html#str.isnumeric>`_
for each element of the Series/Index. If a
string has zero characters, False is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
The ``s1.str.isnumeric`` method is the same as ``s2.str.isdigit`` but
also includes other characters that can represent
quantities such as unicode fractions.
>>> s2 = pd.Series(['23', '³', '⅕', ''], dtype='str')
>>> s2.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_numeric(self._column))
def isupper(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are uppercase.
This is equivalent to running the Python string method
`str.isupper()
<https://docs.python.org/3/library/stdtypes.html#str.isupper>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_upper(self._column))
def islower(self) -> SeriesOrIndex:
"""
Check whether all characters in each string are lowercase.
This is equivalent to running the Python string method
`str.islower()
<https://docs.python.org/3/library/stdtypes.html#str.islower>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_lower(self._column))
def isipv4(self) -> SeriesOrIndex:
"""
Check whether all characters in each string form an IPv4 address.
If a string has zero characters, False is returned for
that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["", "127.0.0.1", "255.255.255.255", "123.456"])
>>> s.str.isipv4()
0 False
1 True
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(str_cast.is_ipv4(self._column))
def lower(self) -> SeriesOrIndex:
"""
Converts all characters to lowercase.
Equivalent to `str.lower()
<https://docs.python.org/3/library/stdtypes.html#str.lower>`_.
Returns
-------
Series or Index of object
A copy of the object with all strings converted to lowercase.
See Also
--------
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and remaining
to lowercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
"""
return self._return_or_inplace(libstrings.to_lower(self._column))
def upper(self) -> SeriesOrIndex:
"""
Convert each string to uppercase.
This only applies to ASCII characters at this time.
Equivalent to `str.upper()
<https://docs.python.org/3/library/stdtypes.html#str.upper>`_.
Returns
-------
Series or Index of object
See Also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and
remaining to lowercase.
capitalize
Converts first character to uppercase and remaining to
lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
"""
return self._return_or_inplace(libstrings.to_upper(self._column))
def capitalize(self) -> SeriesOrIndex:
"""
Convert strings in the Series/Index to be capitalized.
This only applies to ASCII characters at this time.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s = cudf.Series(["hello, friend","goodbye, friend"])
>>> s.str.capitalize()
0 Hello, friend
1 Goodbye, friend
dtype: object
"""
return self._return_or_inplace(libstrings.capitalize(self._column))
def swapcase(self) -> SeriesOrIndex:
"""
Change each lowercase character to uppercase and vice versa.
This only applies to ASCII characters at this time.
Equivalent to `str.swapcase()
<https://docs.python.org/3/library/stdtypes.html#str.swapcase>`_.
Returns
-------
Series or Index of object
See Also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and remaining
to lowercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
return self._return_or_inplace(libstrings.swapcase(self._column))
def title(self) -> SeriesOrIndex:
"""
Uppercase the first letter of each letter after a space
and lowercase the rest.
This only applies to ASCII characters at this time.
Equivalent to `str.title()
<https://docs.python.org/3/library/stdtypes.html#str.title>`_.
Returns
-------
Series or Index of object
See Also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
"""
return self._return_or_inplace(libstrings.title(self._column))
def istitle(self) -> SeriesOrIndex:
"""
Check whether each string is title formatted.
The first letter of each word should be uppercase and the rest
should be lowercase.
Equivalent to :meth:`str.istitle`.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> data = ['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s = cudf.Series(data)
>>> s.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_title(self._column))
def filter_alphanum(
self, repl: Optional[str] = None, keep: bool = True
) -> SeriesOrIndex:
"""
Remove non-alphanumeric characters from strings in this column.
Parameters
----------
repl : str
Optional string to use in place of removed characters.
keep : bool
Set to False to remove all alphanumeric characters instead
of keeping them.
Returns
-------
Series/Index of str dtype
Strings with only alphanumeric characters.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["pears £12", "plums $34", "Temp 72℉", "100K℧"])
>>> s.str.filter_alphanum(" ")
0 pears 12
1 plums 34
2 Temp 72
3 100K
dtype: object
"""
if repl is None:
repl = ""
return self._return_or_inplace(
libstrings.filter_alphanum(
self._column, cudf.Scalar(repl, "str"), keep
),
)
def slice_from(
self, starts: "cudf.Series", stops: "cudf.Series"
) -> SeriesOrIndex:
"""
Return substring of each string using positions for each string.
The starts and stops parameters are of Column type.
Parameters
----------
starts : Series
Beginning position of each the string to extract.
Default is beginning of the each string.
stops : Series
Ending position of the each string to extract.
Default is end of each string.
Use -1 to specify to the end of that string.
Returns
-------
Series/Index of str dtype
A substring of each string using positions for each string.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello","there"])
>>> s
0 hello
1 there
dtype: object
>>> starts = cudf.Series([1, 3])
>>> stops = cudf.Series([5, 5])
>>> s.str.slice_from(starts, stops)
0 ello
1 re
dtype: object
"""
return self._return_or_inplace(
libstrings.slice_from(
self._column,
column.as_column(starts),
column.as_column(stops),
),
)
def slice_replace(
self,
start: Optional[int] = None,
stop: Optional[int] = None,
repl: Optional[str] = None,
) -> SeriesOrIndex:
"""
Replace the specified section of each string with a new string.
Parameters
----------
start : int, optional
Beginning position of the string to replace.
Default is beginning of the each string.
stop : int, optional
Ending position of the string to replace.
Default is end of each string.
repl : str, optional
String to insert into the specified position values.
Returns
-------
Series/Index of str dtype
A new string with the specified section of the string
replaced with `repl` string.
See Also
--------
slice
Just slicing without replacement.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the `end` of
the string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the `start` of the string to `stop`
is replaced with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start`
to `stop` is replaced with `repl`. Everything before or
after `start` and `stop` is included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if start is None:
start = 0
if stop is None:
stop = -1
if repl is None:
repl = ""
return self._return_or_inplace(
libstrings.slice_replace(
self._column, start, stop, cudf.Scalar(repl, "str")
),
)
def insert(
self, start: int = 0, repl: Optional[str] = None
) -> SeriesOrIndex:
"""
Insert the specified string into each string in the specified
position.
Parameters
----------
start : int
Beginning position of the string to replace.
Default is beginning of the each string.
Specify -1 to insert at the end of each string.
repl : str
String to insert into the specified position value.
Returns
-------
Series/Index of str dtype
A new string series with the specified string
inserted at the specified position.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abcdefghij", "0123456789"])
>>> s.str.insert(2, '_')
0 ab_cdefghij
1 01_23456789
dtype: object
When no `repl` is passed, nothing is inserted.
>>> s.str.insert(2)
0 abcdefghij
1 0123456789
dtype: object
Negative values are also supported for `start`.
>>> s.str.insert(-1,'_')
0 abcdefghij_
1 0123456789_
dtype: object
"""
if repl is None:
repl = ""
return self._return_or_inplace(
libstrings.insert(self._column, start, cudf.Scalar(repl, "str")),
)
def get(self, i: int = 0) -> SeriesOrIndex:
"""
Extract element from each component at specified position.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids", "cudf"])
>>> s
0 hello world
1 rapids
2 cudf
dtype: object
>>> s.str.get(10)
0 d
1
2
dtype: object
>>> s.str.get(1)
0 e
1 a
2 u
dtype: object
``get`` also accepts negative index number.
>>> s.str.get(-1)
0 d
1 s
2 f
dtype: object
"""
return self._return_or_inplace(libstrings.get(self._column, i))
def get_json_object(
self,
json_path,
*,
allow_single_quotes=False,
strip_quotes_from_single_strings=True,
missing_fields_as_nulls=False,
):
r"""
Applies a JSONPath string to an input strings column
where each row in the column is a valid json string
Parameters
----------
json_path : str
The JSONPath string to be applied to each row
of the input column
allow_single_quotes : bool, default False
If True, representing strings with single
quotes is allowed.
If False, strings must only be represented
with double quotes.
strip_quotes_from_single_strings : bool, default True
If True, strip the quotes from the return value of
a given row if it is a string.
If False, values returned for a given row include
quotes if they are strings.
missing_fields_as_nulls : bool, default False
If True, when an object is queried for a field
it does not contain, "null" is returned.
If False, when an object is queried for a field
it does not contain, None is returned.
Returns
-------
Column: New strings column containing the retrieved json object strings
Examples
--------
>>> import cudf
>>> s = cudf.Series(
[
\"\"\"
{
"store":{
"book":[
{
"category":"reference",
"author":"Nigel Rees",
"title":"Sayings of the Century",
"price":8.95
},
{
"category":"fiction",
"author":"Evelyn Waugh",
"title":"Sword of Honour",
"price":12.99
}
]
}
}
\"\"\"
])
>>> s
0 {"store": {\n "book": [\n { "cat...
dtype: object
>>> s.str.get_json_object("$.store.book")
0 [\n { "category": "reference",\n ...
dtype: object
"""
options = libstrings.GetJsonObjectOptions(
allow_single_quotes=allow_single_quotes,
strip_quotes_from_single_strings=(
strip_quotes_from_single_strings
),
missing_fields_as_nulls=missing_fields_as_nulls,
)
return self._return_or_inplace(
libstrings.get_json_object(
self._column, cudf.Scalar(json_path, "str"), options
)
)
def split(
self,
pat: Optional[str] = None,
n: int = -1,
expand: bool = False,
regex: Optional[bool] = None,
) -> SeriesOrIndex:
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the beginning, at the
specified delimiter string. Similar to `str.split()
<https://docs.python.org/3/library/stdtypes.html#str.split>`_.
Parameters
----------
pat : str, default None
String or regular expression to split on. If not specified, split
on whitespace.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
expand : bool, default False
Expand the split strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding
dimensionality.
* If ``False``, return Series/Index, containing lists
of strings.
regex : bool, default None
Determines if the passed-in pattern is a regular expression:
* If ``True``, assumes the passed-in pattern is a regular
expression
* If ``False``, treats the pattern as a literal string.
* If pat length is 1, treats pat as a literal string.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
rsplit
Splits string around given separator/delimiter, starting from
the right.
str.split
Standard library version for split.
str.rsplit
Standard library version for rsplit.
Notes
-----
The handling of the n keyword depends on the number
of found splits:
- If found splits > n, make first n splits only
- If found splits <= n, make all splits
- If for a certain row the number of found
splits < n, append None for padding up to n
if ``expand=True``.
If using ``expand=True``, Series and Index callers return
DataFrame and MultiIndex objects, respectively.
Examples
--------
>>> import cudf
>>> data = ["this is a regular sentence",
... "https://docs.python.org/index.html", None]
>>> s = cudf.Series(data)
>>> s
0 this is a regular sentence
1 https://docs.python.org/index.html
2 <NA>
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/index.html]
2 None
dtype: list
Without the ``n`` parameter, the outputs of ``rsplit``
and ``split`` are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/index.html]
2 None
dtype: list
The `n` parameter can be used to limit the number of
splits on the delimiter.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/index.html]
2 None
dtype: list
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat="/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, index.html]
2 None
dtype: list
When using ``expand=True``, the split elements will expand out
into separate columns. If ``<NA>`` value is present, it is propagated
throughout the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3 4
0 this is a regular sentence
1 https://docs.python.org/index.html <NA> <NA> <NA> <NA>
2 <NA> <NA> <NA> <NA> <NA>
"""
if expand not in (True, False):
raise ValueError(
f"expand parameter accepts only : [True, False], "
f"got {expand}"
)
# Pandas treats 0 as all
if n is None or n == 0:
n = -1
if pat is None:
pat = ""
if regex and isinstance(pat, re.Pattern):
pat = pat.pattern
if len(str(pat)) <= 1:
regex = False
if expand:
if self._column.null_count == len(self._column):
result_table = {0: self._column.copy()}
else:
if regex is True:
data, _ = libstrings.split_re(self._column, pat, n)
else:
data, _ = libstrings.split(
self._column, cudf.Scalar(pat, "str"), n
)
if len(data) == 1 and data[0].null_count == len(self._column):
result_table = {}
else:
result_table = data
else:
if regex is True:
result_table = libstrings.split_record_re(self._column, pat, n)
else:
result_table = libstrings.split_record(
self._column, cudf.Scalar(pat, "str"), n
)
return self._return_or_inplace(result_table, expand=expand)
def rsplit(
self,
pat: Optional[str] = None,
n: int = -1,
expand: bool = False,
regex: Optional[bool] = None,
) -> SeriesOrIndex:
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the end, at the
specified delimiter string. Similar to `str.rsplit()
<https://docs.python.org/3/library/stdtypes.html#str.rsplit>`_.
Parameters
----------
pat : str, default ' ' (space)
String to split on, does not yet support regular expressions.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
expand : bool, default False
Expand the split strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding
dimensionality.
* If ``False``, return Series/Index, containing lists
of strings.
regex : bool, default None
Determines if the passed-in pattern is a regular expression:
* If ``True``, assumes the passed-in pattern is a regular
expression
* If ``False``, treats the pattern as a literal string.
* If pat length is 1, treats pat as a literal string.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
split
Split strings around given separator/delimiter.
str.split
Standard library version for split.
str.rsplit
Standard library version for rsplit.
Notes
-----
The handling of the n keyword depends on the number of
found splits:
- If found splits > n, make first n splits only
- If found splits <= n, make all splits
- If for a certain row the number of found splits < n,
append None for padding up to n if ``expand=True``.
If using ``expand=True``, Series and Index callers return
DataFrame and MultiIndex objects, respectively.
Examples
--------
>>> import cudf
>>> s = cudf.Series(
... [
... "this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... None
... ]
... )
>>> s
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 <NA>
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: list
Without the ``n`` parameter, the outputs of ``rsplit``
and ``split`` are identical.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: list
The n parameter can be used to limit the number of
splits on the delimiter. The outputs of split and rsplit are different.
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: list
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 None
dtype: list
When using ``expand=True``, the split elements will expand
out into separate columns. If ``<NA>`` value is present,
it is propagated throughout the columns during the split.
>>> s.str.rsplit(n=2, expand=True)
0 1 2
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html <NA> <NA>
2 <NA> <NA> <NA>
For slightly more complex use cases like splitting the
html document name from a url, a combination of parameter
settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence <NA>
1 https://docs.python.org/3/tutorial index.html
2 <NA> <NA>
"""
if expand not in (True, False):
raise ValueError(
f"expand parameter accepts only : [True, False], "
f"got {expand}"
)
# Pandas treats 0 as all
if n == 0:
n = -1
if pat is None:
pat = ""
if regex and isinstance(pat, re.Pattern):
pat = pat.pattern
if expand:
if self._column.null_count == len(self._column):
result_table = {0: self._column.copy()}
else:
if regex is True:
data, _ = libstrings.rsplit_re(self._column, pat, n)
else:
data, _ = libstrings.rsplit(
self._column, cudf.Scalar(pat, "str"), n
)
if len(data) == 1 and data[0].null_count == len(self._column):
result_table = {}
else:
result_table = data
else:
if regex is True:
result_table = libstrings.rsplit_record_re(
self._column, pat, n
)
else:
result_table = libstrings.rsplit_record(
self._column, cudf.Scalar(pat, "str"), n
)
return self._return_or_inplace(result_table, expand=expand)
def partition(self, sep: str = " ", expand: bool = True) -> SeriesOrIndex:
"""
Split the string at the first occurrence of sep.
This method splits the string at the first occurrence
of sep, and returns 3 elements containing the part
before the separator, the separator itself, and the
part after the separator. If the separator is not found,
return 3 elements containing the string itself, followed
by two empty strings.
Parameters
----------
sep : str, default ' ' (whitespace)
String to split on.
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame / MultiIndex
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is set.
See Also
--------
rpartition
Split the string at the last occurrence of sep.
split
Split strings around given separators.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
Also available on indices:
>>> idx = cudf.Index(['X 123', 'Y 999'])
>>> idx
StringIndex(['X 123' 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
)
"""
if expand is not True:
raise NotImplementedError(
"`expand=False` is currently not supported"
)
if sep is None:
sep = " "
return self._return_or_inplace(
libstrings.partition(self._column, cudf.Scalar(sep, "str"))[0],
expand=expand,
)
def rpartition(self, sep: str = " ", expand: bool = True) -> SeriesOrIndex:
"""
Split the string at the last occurrence of sep.
This method splits the string at the last occurrence
of sep, and returns 3 elements containing the part
before the separator, the separator itself, and the
part after the separator. If the separator is not
found, return 3 elements containing two empty strings,
followed by the string itself.
Parameters
----------
sep : str, default ' ' (whitespace)
String to split on.
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame / MultiIndex
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is set.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
Also available on indices:
>>> idx = cudf.Index(['X 123', 'Y 999'])
>>> idx
StringIndex(['X 123' 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.rpartition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
)
"""
if expand is not True:
raise NotImplementedError(
"`expand=False` is currently not supported"
)
if sep is None:
sep = " "
return self._return_or_inplace(
libstrings.rpartition(self._column, cudf.Scalar(sep, "str"))[0],
expand=expand,
)
def pad(
self, width: int, side: str = "left", fillchar: str = " "
) -> SeriesOrIndex:
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled with
character defined in fillchar.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of object
Returns Series or Index with minimum number
of char in object.
See Also
--------
rjust
Fills the left side of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='left')``.
ljust
Fills the right side of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='right')``.
center
Fills both sides of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='both')``.
zfill
Pad strings in the Series/Index by prepending '0' character.
Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["caribou", "tiger"])
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
try:
side = libstrings.SideType[side.upper()]
except KeyError:
raise ValueError(
"side has to be either one of {'left', 'right', 'both'}"
)
return self._return_or_inplace(
libstrings.pad(self._column, width, fillchar, side)
)
def zfill(self, width: int) -> SeriesOrIndex:
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters
on the left of the string to reach a total string length
width. Strings in the Series/Index with length greater
or equal to width are unchanged.
The sign character is preserved if it appears in the first
position of the string.
Parameters
----------
width : int
Minimum length of resulting string;
strings with length less than width
be prepended with '0' characters.
Returns
-------
Series/Index of str dtype
Returns Series or Index with prepended '0' characters.
See Also
--------
rjust
Fills the left side of strings with an arbitrary character.
ljust
Fills the right side of strings with an arbitrary character.
pad
Fills the specified sides of strings with an arbitrary character.
center
Fills both sides of strings with an arbitrary character.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['-1', '1', '1000', None])
>>> s
0 -1
1 1
2 1000
3 <NA>
dtype: object
Note that ``None`` is not string, therefore it is converted
to ``None``. ``1000`` remains unchanged as
it is longer than width.
>>> s.str.zfill(3)
0 -01
1 001
2 1000
3 <NA>
dtype: object
"""
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(libstrings.zfill(self._column, width))
def center(self, width: int, fillchar: str = " ") -> SeriesOrIndex:
"""
Filling left and right side of strings in the Series/Index with an
additional character.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with fillchar.
fillchar : str, default is ' ' (whitespace)
Additional character for filling.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'b', None, 'd'])
>>> s.str.center(1)
0 a
1 b
2 <NA>
3 d
dtype: object
>>> s.str.center(1, fillchar='-')
0 a
1 b
2 <NA>
3 d
dtype: object
>>> s.str.center(2, fillchar='-')
0 a-
1 b-
2 <NA>
3 d-
dtype: object
>>> s.str.center(5, fillchar='-')
0 --a--
1 --b--
2 <NA>
3 --d--
dtype: object
>>> s.str.center(6, fillchar='-')
0 --a---
1 --b---
2 <NA>
3 --d---
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
libstrings.center(self._column, width, fillchar)
)
def ljust(self, width: int, fillchar: str = " ") -> SeriesOrIndex:
"""
Filling right side of strings in the Series/Index with an additional
character. Equivalent to `str.ljust()
<https://docs.python.org/3/library/stdtypes.html#str.ljust>`_.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with ``fillchar``.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids ai"])
>>> s.str.ljust(10, fillchar="_")
0 hello world
1 rapids ai_
dtype: object
>>> s = cudf.Series(["a", "", "ab", "__"])
>>> s.str.ljust(1, fillchar="-")
0 a
1 -
2 ab
3 __
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
libstrings.ljust(self._column, width, fillchar)
)
def rjust(self, width: int, fillchar: str = " ") -> SeriesOrIndex:
"""
Filling left side of strings in the Series/Index with an additional
character. Equivalent to `str.rjust()
<https://docs.python.org/3/library/stdtypes.html#str.rjust>`_.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with fillchar.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids ai"])
>>> s.str.rjust(20, fillchar="_")
0 _________hello world
1 ___________rapids ai
dtype: object
>>> s = cudf.Series(["a", "", "ab", "__"])
>>> s.str.rjust(1, fillchar="-")
0 a
1 -
2 ab
3 __
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
libstrings.rjust(self._column, width, fillchar)
)
def strip(self, to_strip: Optional[str] = None) -> SeriesOrIndex:
r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of
specified characters from each string in the Series/Index
from left and right sides. Equivalent to `str.strip()
<https://docs.python.org/3/library/stdtypes.html#str.strip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters
will be stripped. If None then whitespaces are removed.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
See Also
--------
lstrip
Remove leading characters in Series/Index.
rstrip
Remove trailing characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', None])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 <NA>
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 <NA>
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 <NA>
dtype: object
"""
if to_strip is None:
to_strip = ""
return self._return_or_inplace(
libstrings.strip(self._column, cudf.Scalar(to_strip, "str"))
)
def lstrip(self, to_strip: Optional[str] = None) -> SeriesOrIndex:
r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines)
or a set of specified characters from
each string in the Series/Index from left side.
Equivalent to `str.lstrip()
<https://docs.python.org/3/library/stdtypes.html#str.lstrip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will
be stripped. If None then whitespaces are removed.
Returns
-------
Series or Index of object
See Also
--------
strip
Remove leading and trailing characters in Series/Index.
rstrip
Remove trailing characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', None])
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 <NA>
dtype: object
"""
if to_strip is None:
to_strip = ""
return self._return_or_inplace(
libstrings.lstrip(self._column, cudf.Scalar(to_strip, "str"))
)
def rstrip(self, to_strip: Optional[str] = None) -> SeriesOrIndex:
r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines)
or a set of specified characters from each
string in the Series/Index from right side.
Equivalent to `str.rstrip()
<https://docs.python.org/3/library/stdtypes.html#str.rstrip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to
be removed. All combinations of this
set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
See Also
--------
strip
Remove leading and trailing characters in Series/Index.
lstrip
Remove leading characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', None])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 <NA>
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 <NA>
dtype: object
"""
if to_strip is None:
to_strip = ""
return self._return_or_inplace(
libstrings.rstrip(self._column, cudf.Scalar(to_strip, "str"))
)
def wrap(self, width: int, **kwargs) -> SeriesOrIndex:
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
Parameters
----------
width : int
Maximum line width.
Returns
-------
Series or Index
Notes
-----
The parameters `expand_tabsbool`, `replace_whitespace`,
`drop_whitespace`, `break_long_words`, `break_on_hyphens`,
`expand_tabsbool` are not yet supported and will raise a
NotImplementedError if they are set to any value.
This method currently achieves behavior matching R's
stringr library ``str_wrap`` function, the equivalent
pandas implementation can be obtained using the
following parameter setting:
expand_tabs = False
replace_whitespace = True
drop_whitespace = True
break_long_words = False
break_on_hyphens = False
Examples
--------
>>> import cudf
>>> data = ['line to be wrapped', 'another line to be wrapped']
>>> s = cudf.Series(data)
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
expand_tabs = kwargs.get("expand_tabs", None)
if expand_tabs is True:
raise NotImplementedError("`expand_tabs=True` is not supported")
elif expand_tabs is None:
warnings.warn(
"wrap current implementation defaults to `expand_tabs`=False"
)
replace_whitespace = kwargs.get("replace_whitespace", True)
if not replace_whitespace:
raise NotImplementedError(
"`replace_whitespace=False` is not supported"
)
drop_whitespace = kwargs.get("drop_whitespace", True)
if not drop_whitespace:
raise NotImplementedError(
"`drop_whitespace=False` is not supported"
)
break_long_words = kwargs.get("break_long_words", None)
if break_long_words is True:
raise NotImplementedError(
"`break_long_words=True` is not supported"
)
elif break_long_words is None:
warnings.warn(
"wrap current implementation defaults to "
"`break_long_words`=False"
)
break_on_hyphens = kwargs.get("break_on_hyphens", None)
if break_long_words is True:
raise NotImplementedError(
"`break_on_hyphens=True` is not supported"
)
elif break_on_hyphens is None:
warnings.warn(
"wrap current implementation defaults to "
"`break_on_hyphens`=False"
)
return self._return_or_inplace(libstrings.wrap(self._column, width))
def count(self, pat: str, flags: int = 0) -> SeriesOrIndex:
r"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular
regex pattern is repeated in each of the string elements of the Series.
Parameters
----------
pat : str or compiled regex
Valid regular expression.
flags : int, default 0 (no flags)
Flags to pass through to the regex engine (e.g. re.MULTILINE)
Returns
-------
Series or Index
Notes
-----
- `flags` parameter currently only supports re.DOTALL
and re.MULTILINE.
- Some characters need to be escaped when passing
in pat. e.g. ``'$'`` has a special meaning in regex
and must be escaped when finding this literal character.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A', 'B', 'Aaba', 'Baca', None, 'CABA', 'cat'])
>>> s.str.count('a')
0 0
1 0
2 2
3 2
4 <NA>
5 0
6 1
dtype: int32
Escape ``'$'`` to find the literal dollar sign.
>>> s = cudf.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int32
This is also available on Index.
>>> index = cudf.Index(['A', 'A', 'Aaba', 'cat'])
>>> index.str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
""" # noqa W605
if isinstance(pat, re.Pattern):
flags = pat.flags & ~re.U
pat = pat.pattern
if not _is_supported_regex_flags(flags):
raise NotImplementedError(
"unsupported value for `flags` parameter"
)
return self._return_or_inplace(
libstrings.count_re(self._column, pat, flags)
)
def findall(self, pat: str, flags: int = 0) -> SeriesOrIndex:
"""
Find all occurrences of pattern or regular expression in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0 (no flags)
Flags to pass through to the regex engine (e.g. re.MULTILINE)
Returns
-------
DataFrame
All non-overlapping matches of pattern or
regular expression in each string of this Series/Index.
Notes
-----
The `flags` parameter currently only supports re.DOTALL and
re.MULTILINE.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: list
When the pattern matches more than one string
in the Series, all matches are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: list
Regular expressions are supported too. For instance,
the search for all the strings ending with
the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: list
If the pattern is found more than once in the same
string, then multiple strings are returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: list
"""
if isinstance(pat, re.Pattern):
flags = pat.flags & ~re.U
pat = pat.pattern
if not _is_supported_regex_flags(flags):
raise NotImplementedError(
"unsupported value for `flags` parameter"
)
data = libstrings.findall(self._column, pat, flags)
return self._return_or_inplace(data)
def find_multiple(self, patterns: SeriesOrIndex) -> "cudf.Series":
"""
Find all first occurrences of patterns in the Series/Index.
Parameters
----------
patterns : array-like, Sequence or Series
Patterns to search for in the given Series/Index.
Returns
-------
Series
A Series with a list of indices of each pattern's first occurrence.
If a pattern is not found, -1 is returned for that index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["strings", "to", "search", "in"])
>>> s
0 strings
1 to
2 search
3 in
dtype: object
>>> t = cudf.Series(["a", "string", "g", "inn", "o", "r", "sea"])
>>> t
0 a
1 string
2 g
3 inn
4 o
5 r
6 sea
dtype: object
>>> s.str.find_multiple(t)
0 [-1, 0, 5, -1, -1, 2, -1]
1 [-1, -1, -1, -1, 1, -1, -1]
2 [2, -1, -1, -1, -1, 3, 0]
3 [-1, -1, -1, -1, -1, -1, -1]
dtype: list
"""
if can_convert_to_column(patterns):
patterns_column = column.as_column(patterns)
else:
raise TypeError(
"patterns should be an array-like or a Series object, "
f"found {type(patterns)}"
)
if not isinstance(patterns_column, StringColumn):
raise TypeError(
"patterns can only be of 'string' dtype, "
f"got: {patterns_column.dtype}"
)
return cudf.Series(
libstrings.find_multiple(self._column, patterns_column),
index=self._parent.index
if isinstance(self._parent, cudf.Series)
else self._parent,
name=self._parent.name,
)
def isempty(self) -> SeriesOrIndex:
"""
Check whether each string is an empty string.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as
the original Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1", "abc", "", " ", None])
>>> s.str.isempty()
0 False
1 False
2 True
3 False
4 False
dtype: bool
"""
return self._return_or_inplace(
# mypy can't deduce that the return value of
# StringColumn.__eq__ is ColumnBase because the binops are
# dynamically added by a mixin class
cast(ColumnBase, self._column == "").fillna(False)
)
def isspace(self) -> SeriesOrIndex:
r"""
Check whether all characters in each string are whitespace.
This is equivalent to running the Python string method
`str.isspace()
<https://docs.python.org/3/library/stdtypes.html#str.isspace>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as
the original Series/Index.
See Also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series([' ', '\t\r\n ', ''])
>>> s.str.isspace()
0 True
1 True
2 False
dtype: bool
"""
return self._return_or_inplace(libstrings.is_space(self._column))
def endswith(self, pat: str) -> SeriesOrIndex:
"""
Test if the end of each string element matches a pattern.
Parameters
----------
pat : str or list-like
If `str` is an `str`, evaluates whether each string of
series ends with `pat`.
If `pat` is a list-like, evaluates whether `self[i]`
ends with `pat[i]`.
Regular expressions are not accepted.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given
pattern matches the end of each string element.
Notes
-----
`na` parameter is not yet supported, as cudf uses
native strings instead of Python objects.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['bat', 'bear', 'caT', None])
>>> s
0 bat
1 bear
2 caT
3 <NA>
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 <NA>
dtype: bool
"""
if pat is None:
raise TypeError(
f"expected a string or a sequence-like object, not "
f"{type(pat).__name__}"
)
elif is_scalar(pat):
result_col = libstrings.endswith(
self._column, cudf.Scalar(pat, "str")
)
else:
result_col = libstrings.endswith_multiple(
self._column, column.as_column(pat, dtype="str")
)
return self._return_or_inplace(result_col)
def startswith(self, pat: Union[str, Sequence]) -> SeriesOrIndex:
"""
Test if the start of each string element matches a pattern.
Equivalent to `str.startswith()
<https://docs.python.org/3/library/stdtypes.html#str.startswith>`_.
Parameters
----------
pat : str or list-like
If `str` is an `str`, evaluates whether each string of
series starts with `pat`.
If `pat` is a list-like, evaluates whether `self[i]`
starts with `pat[i]`.
Regular expressions are not accepted.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given
pattern matches the start of each string element.
See Also
--------
endswith
Same as startswith, but tests the end of string.
contains
Tests if string element contains a pattern.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['bat', 'Bear', 'cat', None])
>>> s
0 bat
1 Bear
2 cat
3 <NA>
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 <NA>
dtype: bool
"""
if pat is None:
raise TypeError(
f"expected a string or a sequence-like object, not "
f"{type(pat).__name__}"
)
elif is_scalar(pat):
result_col = libstrings.startswith(
self._column, cudf.Scalar(pat, "str")
)
else:
result_col = libstrings.startswith_multiple(
self._column, column.as_column(pat, dtype="str")
)
return self._return_or_inplace(result_col)
def removesuffix(self, suffix: str) -> SeriesOrIndex:
"""
Remove a suffix from an object series.
If the suffix is not present, the original string will be returned.
Parameters
----------
suffix : str
Remove the suffix of the string.
Returns
-------
Series/Index: object
The Series or Index with given suffix removed.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["foo_str", "bar_str", "no_suffix"])
>>> s
0 foo_str
1 bar_str
2 no_suffix
dtype: object
>>> s.str.removesuffix("_str")
0 foo
1 bar
2 no_suffix
dtype: object
"""
if suffix is None or len(suffix) == 0:
return self._return_or_inplace(self._column)
ends_column = libstrings.endswith(
self._column, cudf.Scalar(suffix, "str")
)
removed_column = libstrings.slice_strings(
self._column, 0, -len(suffix), None
)
result = cudf._lib.copying.copy_if_else(
removed_column, self._column, ends_column
)
return self._return_or_inplace(result)
def removeprefix(self, prefix: str) -> SeriesOrIndex:
"""
Remove a prefix from an object series.
If the prefix is not present, the original string will be returned.
Parameters
----------
prefix : str
Remove the prefix of the string.
Returns
-------
Series/Index: object
The Series or Index with given prefix removed.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["str_foo", "str_bar", "no_prefix"])
>>> s
0 str_foo
1 str_bar
2 no_prefix
dtype: object
>>> s.str.removeprefix("str_")
0 foo
1 bar
2 no_prefix
dtype: object
"""
if prefix is None or len(prefix) == 0:
return self._return_or_inplace(self._column)
starts_column = libstrings.startswith(
self._column, cudf.Scalar(prefix, "str")
)
removed_column = libstrings.slice_strings(
self._column, len(prefix), None, None
)
result = cudf._lib.copying.copy_if_else(
removed_column, self._column, starts_column
)
return self._return_or_inplace(result)
def find(
self, sub: str, start: int = 0, end: Optional[int] = None
) -> SeriesOrIndex:
"""
Return lowest indexes in each strings in the Series/Index
where the substring is fully contained between ``[start:end]``.
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.find('b')
0 1
1 -1
2 0
3 2
dtype: int32
Parameters such as `start` and `end` can also be used.
>>> s.str.find('b', start=1, end=5)
0 1
1 -1
2 -1
3 2
dtype: int32
"""
if not isinstance(sub, str):
raise TypeError(
f"expected a string object, not {type(sub).__name__}"
)
if end is None:
end = -1
result_col = libstrings.find(
self._column, cudf.Scalar(sub, "str"), start, end
)
return self._return_or_inplace(result_col)
def rfind(
self, sub: str, start: int = 0, end: Optional[int] = None
) -> SeriesOrIndex:
"""
Return highest indexes in each strings in the Series/Index
where the substring is fully contained between ``[start:end]``.
Return -1 on failure. Equivalent to standard `str.rfind()
<https://docs.python.org/3/library/stdtypes.html#str.rfind>`_.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int
See Also
--------
find
Return lowest indexes in each strings.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abc", "hello world", "rapids ai"])
>>> s.str.rfind('a')
0 0
1 -1
2 7
dtype: int32
Using `start` and `end` parameters.
>>> s.str.rfind('a', start=2, end=5)
0 -1
1 -1
2 -1
dtype: int32
"""
if not isinstance(sub, str):
raise TypeError(
f"expected a string object, not {type(sub).__name__}"
)
if end is None:
end = -1
result_col = libstrings.rfind(
self._column, cudf.Scalar(sub, "str"), start, end
)
return self._return_or_inplace(result_col)
def index(
self, sub: str, start: int = 0, end: Optional[int] = None
) -> SeriesOrIndex:
"""
Return lowest indexes in each strings where the substring
is fully contained between ``[start:end]``. This is the same
as str.find except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.index('b')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: substring not found
Parameters such as `start` and `end` can also be used.
>>> s = cudf.Series(['abc', 'abb','ab' ,'ddb'])
>>> s.str.index('b', start=1, end=5)
0 1
1 1
2 1
3 2
dtype: int32
"""
if not isinstance(sub, str):
raise TypeError(
f"expected a string object, not {type(sub).__name__}"
)
if end is None:
end = -1
result_col = libstrings.find(
self._column, cudf.Scalar(sub, "str"), start, end
)
result = self._return_or_inplace(result_col)
if (result == -1).any():
raise ValueError("substring not found")
else:
return result
def rindex(
self, sub: str, start: int = 0, end: Optional[int] = None
) -> SeriesOrIndex:
"""
Return highest indexes in each strings where the substring
is fully contained between ``[start:end]``. This is the same
as ``str.rfind`` except instead of returning -1, it raises a
``ValueError`` when the substring is not found.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.rindex('b')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: substring not found
Parameters such as `start` and `end` can also be used.
>>> s = cudf.Series(['abc', 'abb','ab' ,'ddb'])
>>> s.str.rindex('b', start=1, end=5)
0 1
1 2
2 1
3 2
dtype: int32
"""
if not isinstance(sub, str):
raise TypeError(
f"expected a string object, not {type(sub).__name__}"
)
if end is None:
end = -1
result_col = libstrings.rfind(
self._column, cudf.Scalar(sub, "str"), start, end
)
result = self._return_or_inplace(result_col)
if (result == -1).any():
raise ValueError("substring not found")
else:
return result
def match(
self, pat: str, case: bool = True, flags: int = 0
) -> SeriesOrIndex:
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str or compiled regex
Character sequence or regular expression.
flags : int, default 0 (no flags)
Flags to pass through to the regex engine (e.g. re.MULTILINE)
Returns
-------
Series or Index of boolean values.
Notes
-----
Parameters `case` and `na` are currently not supported.
The `flags` parameter currently only supports re.DOTALL and
re.MULTILINE.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["rapids", "ai", "cudf"])
Checking for strings starting with `a`.
>>> s.str.match('a')
0 False
1 True
2 False
dtype: bool
Checking for strings starting with any of `a` or `c`.
>>> s.str.match('[ac]')
0 False
1 True
2 True
dtype: bool
"""
if case is not True:
raise NotImplementedError("`case` parameter is not yet supported")
if isinstance(pat, re.Pattern):
flags = pat.flags & ~re.U
pat = pat.pattern
if not _is_supported_regex_flags(flags):
raise NotImplementedError(
"unsupported value for `flags` parameter"
)
return self._return_or_inplace(
libstrings.match_re(self._column, pat, flags)
)
def url_decode(self) -> SeriesOrIndex:
"""
Returns a URL-decoded format of each string.
No format checking is performed. All characters
are expected to be encoded as UTF-8 hex values.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A%2FB-C%2FD', 'e%20f.g', '4-5%2C6'])
>>> s.str.url_decode()
0 A/B-C/D
1 e f.g
2 4-5,6
dtype: object
>>> data = ["https%3A%2F%2Frapids.ai%2Fstart.html",
... "https%3A%2F%2Fmedium.com%2Frapids-ai"]
>>> s = cudf.Series(data)
>>> s.str.url_decode()
0 https://rapids.ai/start.html
1 https://medium.com/rapids-ai
dtype: object
"""
return self._return_or_inplace(libstrings.url_decode(self._column))
def url_encode(self) -> SeriesOrIndex:
"""
Returns a URL-encoded format of each string.
No format checking is performed.
All characters are encoded except for ASCII letters,
digits, and these characters: ``'.','_','-','~'``.
Encoding converts to hex using UTF-8 encoded bytes.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A/B-C/D', 'e f.g', '4-5,6'])
>>> s.str.url_encode()
0 A%2FB-C%2FD
1 e%20f.g
2 4-5%2C6
dtype: object
>>> data = ["https://rapids.ai/start.html",
... "https://medium.com/rapids-ai"]
>>> s = cudf.Series(data)
>>> s.str.url_encode()
0 https%3A%2F%2Frapids.ai%2Fstart.html
1 https%3A%2F%2Fmedium.com%2Frapids-ai
dtype: object
"""
return self._return_or_inplace(libstrings.url_encode(self._column))
def code_points(self) -> SeriesOrIndex:
"""
Returns an array by filling it with the UTF-8 code point
values for each character of each string.
This function uses the ``len()`` method to determine
the size of each sub-array of integers.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["a","xyz", "éee"])
>>> s.str.code_points()
0 97
1 120
2 121
3 122
4 50089
5 101
6 101
dtype: int32
>>> s = cudf.Series(["abc"])
>>> s.str.code_points()
0 97
1 98
2 99
dtype: int32
"""
new_col = libstrings.code_points(self._column)
if isinstance(self._parent, cudf.Series):
return cudf.Series(new_col, name=self._parent.name)
elif isinstance(self._parent, cudf.BaseIndex):
return cudf.core.index.as_index(new_col, name=self._parent.name)
else:
return new_col
def translate(self, table: dict) -> SeriesOrIndex:
"""
Map all characters in the string through the given
mapping table.
Equivalent to standard `str.translate()
<https://docs.python.org/3/library/stdtypes.html#str.translate>`_.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None.
Unmapped characters are left untouched.
`str.maketrans()
<https://docs.python.org/3/library/stdtypes.html#str.maketrans>`_
is a helper function for making translation tables.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence','SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.translate({'a': "1"})
0 lower
1 CAPITALS
2 this is 1 sentence
3 SwApC1Se
dtype: object
>>> s.str.translate({'a': "1", "e":"#"})
0 low#r
1 CAPITALS
2 this is 1 s#nt#nc#
3 SwApC1S#
dtype: object
"""
table = str.maketrans(table)
return self._return_or_inplace(
libstrings.translate(self._column, table)
)
def filter_characters(
self, table: dict, keep: bool = True, repl: Optional[str] = None
) -> SeriesOrIndex:
"""
Remove characters from each string using the character ranges
in the given mapping table.
Parameters
----------
table : dict
This table is a range of Unicode ordinals to filter.
The minimum value is the key and the maximum value is the value.
You can use `str.maketrans()
<https://docs.python.org/3/library/stdtypes.html#str.maketrans>`_
as a helper function for making the filter table.
Overlapping ranges will cause undefined results.
Range values are inclusive.
keep : boolean
If False, the character ranges in the ``table`` are removed.
If True, the character ranges not in the ``table`` are removed.
Default is True.
repl : str
Optional replacement string to use in place of removed characters.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> data = ['aeiou', 'AEIOU', '0123456789']
>>> s = cudf.Series(data)
>>> s.str.filter_characters({'a':'l', 'M':'Z', '4':'6'})
0 aei
1 OU
2 456
dtype: object
>>> s.str.filter_characters({'a':'l', 'M':'Z', '4':'6'}, False, "_")
0 ___ou
1 AEI__
2 0123___789
dtype: object
"""
if repl is None:
repl = ""
table = str.maketrans(table)
return self._return_or_inplace(
libstrings.filter_characters(
self._column, table, keep, cudf.Scalar(repl, "str")
),
)
def normalize_spaces(self) -> SeriesOrIndex:
r"""
Remove extra whitespace between tokens and trim whitespace
from the beginning and the end of each string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello \\t world"," test string "])
>>> ser.str.normalize_spaces()
0 hello world
1 test string
dtype: object
"""
return self._return_or_inplace(
libstrings.normalize_spaces(self._column)
)
def normalize_characters(self, do_lower: bool = True) -> SeriesOrIndex:
r"""
Normalizes strings characters for tokenizing.
This uses the normalizer that is built into the
subword_tokenize function which includes:
- adding padding around punctuation (unicode category starts with
"P") as well as certain ASCII symbols like "^" and "$"
- adding padding around the CJK Unicode block characters
- changing whitespace (e.g. ``\t``, ``\n``, ``\r``) to space
- removing control characters (unicode categories "Cc" and "Cf")
If `do_lower_case = true`, lower-casing also removes the accents.
The accents cannot be removed from upper-case characters without
lower-casing and lower-casing cannot be performed without also
removing accents. However, if the accented character is already
lower-case, then only the accent is removed.
Parameters
----------
do_lower : bool, Default is True
If set to True, characters will be lower-cased and accents
will be removed. If False, accented and upper-case characters
are not transformed.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["héllo, \tworld","ĂĆCĖÑTED","$99"])
>>> ser.str.normalize_characters()
0 hello , world
1 accented
2 $ 99
dtype: object
>>> ser.str.normalize_characters(do_lower=False)
0 héllo , world
1 ĂĆCĖÑTED
2 $ 99
dtype: object
"""
return self._return_or_inplace(
libstrings.normalize_characters(self._column, do_lower)
)
def tokenize(self, delimiter: str = " ") -> SeriesOrIndex:
"""
Each string is split into tokens using the provided delimiter(s).
The sequence returned contains the tokens in the order
they were found.
Parameters
----------
delimiter : str or list of strs, Default is whitespace.
The string used to locate the split points of each string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> data = ["hello world", "goodbye world", "hello goodbye"]
>>> ser = cudf.Series(data)
>>> ser.str.tokenize()
0 hello
0 world
1 goodbye
1 world
2 hello
2 goodbye
dtype: object
"""
delimiter = _massage_string_arg(delimiter, "delimiter", allow_col=True)
if isinstance(delimiter, Column):
result = self._return_or_inplace(
libstrings._tokenize_column(self._column, delimiter),
retain_index=False,
)
elif isinstance(delimiter, cudf.Scalar):
result = self._return_or_inplace(
libstrings._tokenize_scalar(self._column, delimiter),
retain_index=False,
)
else:
raise TypeError(
f"Expected a Scalar or Column\
for delimiters, but got {type(delimiter)}"
)
if isinstance(self._parent, cudf.Series):
result.index = self._parent.index.repeat( # type: ignore
self.token_count(delimiter=delimiter)
)
return result
def detokenize(
self, indices: "cudf.Series", separator: str = " "
) -> SeriesOrIndex:
"""
Combines tokens into strings by concatenating them in the order
in which they appear in the ``indices`` column. The ``separator`` is
concatenated between each token.
Parameters
----------
indices : Series
Each value identifies the output row for the corresponding token.
separator : str
The string concatenated between each token in an output row.
Default is space.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> strs = cudf.Series(["hello", "world", "one", "two", "three"])
>>> indices = cudf.Series([0, 0, 1, 1, 2])
>>> strs.str.detokenize(indices)
0 hello world
1 one two
2 three
dtype: object
"""
separator = _massage_string_arg(separator, "separator")
return self._return_or_inplace(
libstrings.detokenize(self._column, indices._column, separator),
retain_index=False,
)
def character_tokenize(self) -> SeriesOrIndex:
"""
Each string is split into individual characters.
The sequence returned contains each character as an individual string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> data = ["hello world", None, "goodbye, thank you."]
>>> ser = cudf.Series(data)
>>> ser.str.character_tokenize()
0 h
0 e
0 l
0 l
0 o
0
0 w
0 o
0 r
0 l
0 d
2 g
2 o
2 o
2 d
2 b
2 y
2 e
2 ,
2
2 t
2 h
2 a
2 n
2 k
2
2 y
2 o
2 u
2 .
dtype: object
"""
result_col = libstrings.character_tokenize(self._column)
if isinstance(self._parent, cudf.Series):
lengths = self.len().fillna(0)
index = self._parent.index.repeat(lengths)
return cudf.Series(result_col, name=self._parent.name, index=index)
elif isinstance(self._parent, cudf.BaseIndex):
return cudf.core.index.as_index(result_col, name=self._parent.name)
else:
return result_col
def token_count(self, delimiter: str = " ") -> SeriesOrIndex:
"""
Each string is split into tokens using the provided delimiter.
The returned integer sequence is the number of tokens in each string.
Parameters
----------
delimiter : str or list of strs, Default is whitespace.
The characters or strings used to locate the
split points of each string.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello world","goodbye",""])
>>> ser.str.token_count()
0 2
1 1
2 0
dtype: int32
"""
delimiter = _massage_string_arg(delimiter, "delimiter", allow_col=True)
if isinstance(delimiter, Column):
return self._return_or_inplace(
libstrings._count_tokens_column(self._column, delimiter)
)
elif isinstance(delimiter, cudf.Scalar):
return self._return_or_inplace(
libstrings._count_tokens_scalar(self._column, delimiter)
)
else:
raise TypeError(
f"Expected a Scalar or Column\
for delimiters, but got {type(delimiter)}"
)
def ngrams(self, n: int = 2, separator: str = "_") -> SeriesOrIndex:
"""
Generate the n-grams from a set of tokens, each record
in series is treated a token.
You can generate tokens from a Series instance using
the ``Series.str.tokenize()`` function.
Parameters
----------
n : int
The degree of the n-gram (number of consecutive tokens).
Default of 2 for bigrams.
separator : str
The separator to use between within an n-gram.
Default is '_'.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> str_series.str.ngrams(2, "_")
0 this is my_favorite book
dtype: object
>>> str_series = cudf.Series(['abc','def','xyz','hhh'])
>>> str_series.str.ngrams(2, "_")
0 abc_def
1 def_xyz
2 xyz_hhh
dtype: object
"""
separator = _massage_string_arg(separator, "separator")
return self._return_or_inplace(
libstrings.generate_ngrams(self._column, n, separator),
retain_index=False,
)
def character_ngrams(
self, n: int = 2, as_list: bool = False
) -> SeriesOrIndex:
"""
Generate the n-grams from characters in a column of strings.
Parameters
----------
n : int
The degree of the n-gram (number of consecutive characters).
Default of 2 for bigrams.
as_list : bool
Set to True to return ngrams in a list column where each
list element is the ngrams for each string.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['abcd','efgh','xyz'])
>>> str_series.str.character_ngrams(2)
0 ab
0 bc
0 cd
1 ef
1 fg
1 gh
2 xy
2 yz
dtype: object
>>> str_series.str.character_ngrams(3)
0 abc
0 bcd
1 efg
1 fgh
2 xyz
dtype: object
>>> str_series.str.character_ngrams(3,True)
0 [abc, bcd]
1 [efg, fgh]
2 [xyz]
dtype: list
"""
ngrams = libstrings.generate_character_ngrams(self._column, n)
# convert the output to a list by just generating the
# offsets for the output list column
sn = (self.len() - (n - 1)).clip(0, None).fillna(0) # type: ignore
sizes = libcudf.concat.concat_columns(
[column.as_column(0, dtype=np.int32, length=1), sn._column]
)
oc = libcudf.reduce.scan("cumsum", sizes, True)
lc = cudf.core.column.ListColumn(
size=self._column.size,
dtype=cudf.ListDtype(self._column.dtype),
mask=self._column.mask,
offset=0,
null_count=self._column.null_count,
children=(oc, ngrams),
)
result = self._return_or_inplace(lc, retain_index=True)
if isinstance(result, cudf.Series) and not as_list:
return result.explode()
return result
def hash_character_ngrams(
self, n: int = 5, as_list: bool = False
) -> SeriesOrIndex:
"""
Generate hashes of n-grams from characters in a column of strings.
The MurmurHash32 algorithm is used to produce the hash results.
Parameters
----------
n : int
The degree of the n-gram (number of consecutive characters).
Default is 5.
as_list : bool
Set to True to return the hashes in a list column where each
list element is the hashes for each string.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['abcdefg','stuvwxyz'])
>>> str_series.str.hash_character_ngrams(5, True)
0 [3902511862, 570445242, 4202475763]
1 [556054766, 3166857694, 3760633458, 192452857]
dtype: list
>>> str_series.str.hash_character_ngrams(5)
0 3902511862
0 570445242
0 4202475763
1 556054766
1 3166857694
1 3760633458
1 192452857
dtype: uint32
"""
result = self._return_or_inplace(
libstrings.hash_character_ngrams(self._column, n),
retain_index=True,
)
if isinstance(result, cudf.Series) and not as_list:
return result.explode()
return result
def ngrams_tokenize(
self, n: int = 2, delimiter: str = " ", separator: str = "_"
) -> SeriesOrIndex:
"""
Generate the n-grams using tokens from each string.
This will tokenize each string and then generate ngrams for each
string.
Parameters
----------
n : int, Default 2.
The degree of the n-gram (number of consecutive tokens).
delimiter : str, Default is white-space.
The character used to locate the split points of each string.
sep : str, Default is '_'.
The separator to use between tokens within an n-gram.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(['this is the', 'best book'])
>>> ser.str.ngrams_tokenize(n=2, sep='_')
0 this_is
1 is_the
2 best_book
dtype: object
"""
delimiter = _massage_string_arg(delimiter, "delimiter")
separator = _massage_string_arg(separator, "separator")
return self._return_or_inplace(
libstrings.ngrams_tokenize(self._column, n, delimiter, separator),
retain_index=False,
)
def replace_tokens(
self, targets, replacements, delimiter: Optional[str] = None
) -> SeriesOrIndex:
"""
The targets tokens are searched for within each string in the series
and replaced with the corresponding replacements if found.
Tokens are identified by the delimiter character provided.
Parameters
----------
targets : array-like, Sequence or Series
The tokens to search for inside each string.
replacements : array-like, Sequence, Series or str
The strings to replace for each found target token found.
Alternately, this can be a single str instance and would be
used as replacement for each string found.
delimiter : str
The character used to locate the tokens of each string.
Default is whitespace.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> sr = cudf.Series(["this is me", "theme music", ""])
>>> targets = cudf.Series(["is", "me"])
>>> sr.str.replace_tokens(targets=targets, replacements="_")
0 this _ _
1 theme music
2
dtype: object
>>> sr = cudf.Series(["this;is;me", "theme;music", ""])
>>> sr.str.replace_tokens(targets=targets, replacements=":")
0 this;is;me
1 theme;music
2
dtype: object
"""
if can_convert_to_column(targets):
targets_column = column.as_column(targets)
else:
raise TypeError(
f"targets should be an array-like or a Series object, "
f"found {type(targets)}"
)
if is_scalar(replacements):
replacements_column = column.as_column([replacements])
elif can_convert_to_column(replacements):
replacements_column = column.as_column(replacements)
if len(targets_column) != len(replacements_column):
raise ValueError(
"targets and replacements should be same size"
" sequences unless replacements is a string."
)
else:
raise TypeError(
f"replacements should be an str, array-like or Series object, "
f"found {type(replacements)}"
)
if delimiter is None:
delimiter = ""
elif not is_scalar(delimiter):
raise TypeError(
f"Type of delimiter should be a string,"
f" found {type(delimiter)}"
)
return self._return_or_inplace(
libstrings.replace_tokens(
self._column,
targets_column,
replacements_column,
cudf.Scalar(delimiter, dtype="str"),
),
)
def filter_tokens(
self,
min_token_length: int,
replacement: Optional[str] = None,
delimiter: Optional[str] = None,
) -> SeriesOrIndex:
"""
Remove tokens from within each string in the series that are
smaller than min_token_length and optionally replace them
with the replacement string.
Tokens are identified by the delimiter character provided.
Parameters
----------
min_token_length: int
Minimum number of characters for a token to be retained
in the output string.
replacement : str
String used in place of removed tokens.
delimiter : str
The character(s) used to locate the tokens of each string.
Default is whitespace.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> sr = cudf.Series(["this is me", "theme music", ""])
>>> sr.str.filter_tokens(3, replacement="_")
0 this _ _
1 theme music
2
dtype: object
>>> sr = cudf.Series(["this;is;me", "theme;music", ""])
>>> sr.str.filter_tokens(5,None,";")
0 ;;
1 theme;music
2
dtype: object
"""
if replacement is None:
replacement = ""
elif not is_scalar(replacement):
raise TypeError(
f"Type of replacement should be a string,"
f" found {type(replacement)}"
)
if delimiter is None:
delimiter = ""
elif not is_scalar(delimiter):
raise TypeError(
f"Type of delimiter should be a string,"
f" found {type(delimiter)}"
)
return self._return_or_inplace(
libstrings.filter_tokens(
self._column,
min_token_length,
cudf.Scalar(replacement, dtype="str"),
cudf.Scalar(delimiter, dtype="str"),
),
)
def porter_stemmer_measure(self) -> SeriesOrIndex:
"""
Compute the Porter Stemmer measure for each string.
The Porter Stemmer algorithm is described `here
<https://tartarus.org/martin/PorterStemmer/def.txt>`_.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello", "super"])
>>> ser.str.porter_stemmer_measure()
0 1
1 2
dtype: int32
"""
return self._return_or_inplace(
libstrings.porter_stemmer_measure(self._column)
)
def is_consonant(self, position) -> SeriesOrIndex:
"""
Return true for strings where the character at ``position`` is a
consonant. The ``position`` parameter may also be a list of integers
to check different characters per string.
If the ``position`` is larger than the string length, False is
returned for that string.
Parameters
----------
position: int or list-like
The character position to check within each string.
Returns
-------
Series or Index of bool dtype.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["toy", "trouble"])
>>> ser.str.is_consonant(1)
0 False
1 True
dtype: bool
>>> positions = cudf.Series([2, 3])
>>> ser.str.is_consonant(positions)
0 True
1 False
dtype: bool
"""
ltype = libstrings.LetterType.CONSONANT
if can_convert_to_column(position):
return self._return_or_inplace(
libstrings.is_letter_multi(
self._column, ltype, column.as_column(position)
),
)
return self._return_or_inplace(
libstrings.is_letter(self._column, ltype, position)
)
def is_vowel(self, position) -> SeriesOrIndex:
"""
Return true for strings where the character at ``position`` is a
vowel -- not a consonant. The ``position`` parameter may also be
a list of integers to check different characters per string.
If the ``position`` is larger than the string length, False is
returned for that string.
Parameters
----------
position: int or list-like
The character position to check within each string.
Returns
-------
Series or Index of bool dtype.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["toy", "trouble"])
>>> ser.str.is_vowel(1)
0 True
1 False
dtype: bool
>>> positions = cudf.Series([2, 3])
>>> ser.str.is_vowel(positions)
0 False
1 True
dtype: bool
"""
ltype = libstrings.LetterType.VOWEL
if can_convert_to_column(position):
return self._return_or_inplace(
libstrings.is_letter_multi(
self._column, ltype, column.as_column(position)
),
)
return self._return_or_inplace(
libstrings.is_letter(self._column, ltype, position)
)
def edit_distance(self, targets) -> SeriesOrIndex:
"""
The ``targets`` strings are measured against the strings in this
instance using the Levenshtein edit distance algorithm.
https://www.cuelogic.com/blog/the-levenshtein-algorithm
The ``targets`` parameter may also be a single string in which
case the edit distance is computed for all the strings against
that single string.
Parameters
----------
targets : array-like, Sequence or Series or str
The string(s) to measure against each string.
Returns
-------
Series or Index of int32.
Examples
--------
>>> import cudf
>>> sr = cudf.Series(["puppy", "doggy", "kitty"])
>>> targets = cudf.Series(["pup", "dogie", "kitten"])
>>> sr.str.edit_distance(targets=targets)
0 2
1 2
2 2
dtype: int32
>>> sr.str.edit_distance("puppy")
0 0
1 4
2 4
dtype: int32
"""
if is_scalar(targets):
targets_column = column.as_column([targets])
elif can_convert_to_column(targets):
targets_column = column.as_column(targets)
else:
raise TypeError(
f"targets should be an str, array-like or Series object, "
f"found {type(targets)}"
)
return self._return_or_inplace(
libstrings.edit_distance(self._column, targets_column)
)
def edit_distance_matrix(self) -> SeriesOrIndex:
"""Computes the edit distance between strings in the series.
The series to compute the matrix should have more than 2 strings and
should not contain nulls.
Edit distance is measured based on the `Levenshtein edit distance
algorithm <https://www.cuelogic.com/blog/the-levenshtein-algorithm>`_.
Returns
-------
Series of ListDtype(int64)
Assume ``N`` is the length of this series. The return series
contains ``N`` lists of size ``N``, where the ``j`` th number in
the ``i`` th row of the series tells the edit distance between the
``i`` th string and the ``j`` th string of this series. The matrix
is symmetric. Diagonal elements are 0.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'bc', 'cba'])
>>> s.str.edit_distance_matrix()
0 [0, 1, 2]
1 [1, 0, 2]
2 [2, 2, 0]
dtype: list
"""
if self._column.size < 2:
raise ValueError(
"Require size >= 2 to compute edit distance matrix."
)
if self._column.has_nulls():
raise ValueError(
"Cannot compute edit distance between null strings. "
"Consider removing them using `dropna` or fill with `fillna`."
)
return self._return_or_inplace(
libstrings.edit_distance_matrix(self._column)
)
def minhash(
self, seeds: Optional[ColumnLike] = None, width: int = 4
) -> SeriesOrIndex:
"""
Compute the minhash of a strings column.
This uses the MurmurHash3_x86_32 algorithm for the hash function.
Parameters
----------
seeds : ColumnLike
The seeds used for the hash algorithm.
Must be of type uint32.
width : int
The width of the substring to hash.
Default is 4 characters.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> seeds = cudf.Series([0], dtype=np.uint32)
>>> str_series.str.minhash(seeds)
0 [21141582]
1 [962346254]
dtype: list
>>> seeds = cudf.Series([0, 1, 2], dtype=np.uint32)
>>> str_series.str.minhash(seeds)
0 [21141582, 403093213, 1258052021]
1 [962346254, 677440381, 122618762]
dtype: list
"""
if seeds is None:
seeds_column = column.as_column(0, dtype=np.uint32, length=1)
else:
seeds_column = column.as_column(seeds)
if seeds_column.dtype != np.uint32:
raise ValueError(
f"Expecting a Series with dtype uint32, got {type(seeds)}"
)
return self._return_or_inplace(
libstrings.minhash(self._column, seeds_column, width)
)
def minhash64(
self, seeds: Optional[ColumnLike] = None, width: int = 4
) -> SeriesOrIndex:
"""
Compute the minhash of a strings column.
This uses the MurmurHash3_x64_128 algorithm for the hash function.
This function generates 2 uint64 values but only the first
uint64 value is used.
Parameters
----------
seeds : ColumnLike
The seeds used for the hash algorithm.
Must be of type uint64.
width : int
The width of the substring to hash.
Default is 4 characters.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> seeds = cudf.Series([0, 1, 2], dtype=np.uint64)
>>> str_series.str.minhash64(seeds)
0 [3232308021562742685, 4445611509348165860, 586435843695903598]
1 [23008204270530356, 1281229757012344693, 153762819128779913]
dtype: list
"""
if seeds is None:
seeds_column = column.as_column(0, dtype=np.uint64, length=1)
else:
seeds_column = column.as_column(seeds)
if seeds_column.dtype != np.uint64:
raise ValueError(
f"Expecting a Series with dtype uint64, got {type(seeds)}"
)
return self._return_or_inplace(
libstrings.minhash64(self._column, seeds_column, width)
)
def jaccard_index(self, input: cudf.Series, width: int) -> SeriesOrIndex:
"""
Compute the Jaccard index between this column and the given
input strings column.
Parameters
----------
input : Series
The input strings column to compute the Jaccard index against.
Must have the same number of strings as this column.
width : int
The number of characters for the sliding window calculation.
Examples
--------
>>> import cudf
>>> str1 = cudf.Series(["the brown dog", "jumped about"])
>>> str2 = cudf.Series(["the black cat", "jumped around"])
>>> str1.str.jaccard_index(str2, 5)
0 0.058824
1 0.307692
dtype: float32
"""
return self._return_or_inplace(
libstrings.jaccard_index(self._column, input._column, width),
)
def _massage_string_arg(value, name, allow_col=False):
if isinstance(value, cudf.Scalar):
return value
if isinstance(value, str):
return cudf.Scalar(value, dtype="str")
allowed_types = ["Scalar"]
if allow_col:
if isinstance(value, list):
return column.as_column(value, dtype="str")
if isinstance(value, Column) and is_string_dtype(value.dtype):
return value
allowed_types.append("Column")
raise ValueError(
f"Expected {_expected_types_format(allowed_types)} "
f"for {name} but got {type(value)}"
)
def _expected_types_format(types):
if len(types) == 1:
return types[0]
return ", ".join(types[:-1]) + ", or " + types[-1]
class StringColumn(column.ColumnBase):
"""
Implements operations for Columns of String type
Parameters
----------
mask : Buffer
The validity mask
offset : int
Data offset
children : Tuple[Column]
Two non-null columns containing the string data and offsets
respectively
"""
_start_offset: Optional[int]
_end_offset: Optional[int]
_VALID_BINARY_OPERATIONS = {
"__eq__",
"__ne__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__add__",
"__radd__",
# These operators aren't actually supported, they only exist to allow
# empty column binops with scalars of arbitrary other dtypes. See
# the _binaryop method for more information.
"__sub__",
"__mul__",
"__mod__",
"__pow__",
"__truediv__",
"__floordiv__",
}
def __init__(
self,
mask: Optional[Buffer] = None,
size: Optional[int] = None, # TODO: make non-optional
offset: int = 0,
null_count: Optional[int] = None,
children: Tuple["column.ColumnBase", ...] = (),
):
dtype = cudf.api.types.dtype("object")
if size is None:
for child in children:
assert child.offset == 0
if len(children) == 0:
size = 0
elif children[0].size == 0:
size = 0
else:
# one less because the last element of offsets is the number of
# bytes in the data buffer
size = children[0].size - 1
size = size - offset
if len(children) == 0 and size != 0:
# all nulls-column:
offsets = column.full(size + 1, 0, dtype=size_type_dtype)
chars = cudf.core.column.as_column([], dtype="int8")
children = (offsets, chars)
super().__init__(
data=None,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
self._start_offset = None
self._end_offset = None
def copy(self, deep: bool = True):
# Since string columns are immutable, both deep
# and shallow copies share the underlying device data and mask.
return super().copy(deep=False)
@property
def start_offset(self) -> int:
if self._start_offset is None:
if (
len(self.base_children) == 2
and self.offset < self.base_children[0].size
):
self._start_offset = int(
self.base_children[0].element_indexing(self.offset)
)
else:
self._start_offset = 0
return self._start_offset
@property
def end_offset(self) -> int:
if self._end_offset is None:
if (
len(self.base_children) == 2
and (self.offset + self.size) < self.base_children[0].size
):
self._end_offset = int(
self.base_children[0].element_indexing(
self.offset + self.size
)
)
else:
self._end_offset = 0
return self._end_offset
@cached_property
def memory_usage(self) -> int:
n = 0
if len(self.base_children) == 2:
child0_size = (self.size + 1) * self.base_children[
0
].dtype.itemsize
child1_size = (
self.end_offset - self.start_offset
) * self.base_children[1].dtype.itemsize
n += child0_size + child1_size
if self.nullable:
n += cudf._lib.null_mask.bitmask_allocation_size_bytes(self.size)
return n
@property
def base_size(self) -> int:
if len(self.base_children) == 0:
return 0
else:
return self.base_children[0].size - 1
def data_array_view(
self, *, mode="write"
) -> cuda.devicearray.DeviceNDArray:
raise ValueError("Cannot get an array view of a StringColumn")
def to_arrow(self) -> pa.Array:
"""Convert to PyArrow Array
Examples
--------
>>> import cudf
>>> col = cudf.core.column.as_column([1, 2, 3, 4])
>>> col.to_arrow()
<pyarrow.lib.Int64Array object at 0x7f886547f830>
[
1,
2,
3,
4
]
"""
if self.null_count == len(self):
return pa.NullArray.from_buffers(
pa.null(), len(self), [pa.py_buffer(b"")]
)
else:
return super().to_arrow()
def sum(
self,
skipna: Optional[bool] = None,
dtype: Optional[Dtype] = None,
min_count: int = 0,
):
result_col = self._process_for_reduction(
skipna=skipna, min_count=min_count
)
if isinstance(result_col, type(self)):
return libstrings.join(
result_col,
sep=cudf.Scalar(""),
na_rep=cudf.Scalar(None, "str"),
).element_indexing(0)
else:
return result_col
def set_base_data(self, value):
if value is not None:
raise RuntimeError(
"StringColumns do not use data attribute of Column, use "
"`set_base_children` instead"
)
super().set_base_data(value)
def __contains__(self, item: ScalarLike) -> bool:
if is_scalar(item):
return True in libcudf.search.contains(
self, column.as_column([item], dtype=self.dtype)
)
else:
return True in libcudf.search.contains(
self, column.as_column(item, dtype=self.dtype)
)
def as_numerical_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.NumericalColumn":
out_dtype = cudf.api.types.dtype(dtype)
string_col = self
if out_dtype.kind in {"i", "u"}:
if not libstrings.is_integer(string_col).all():
raise ValueError(
"Could not convert strings to integer "
"type due to presence of non-integer values."
)
elif out_dtype.kind == "f":
if not libstrings.is_float(string_col).all():
raise ValueError(
"Could not convert strings to float "
"type due to presence of non-floating values."
)
result_col = _str_to_numeric_typecast_functions[out_dtype](string_col)
return result_col
def _as_datetime_or_timedelta_column(self, dtype, format):
if len(self) == 0:
return cudf.core.column.as_column([], dtype=dtype)
# Check for None strings
if (self == "None").any():
raise ValueError("Could not convert `None` value to datetime")
is_nat = self == "NaT"
if dtype.kind == "M":
without_nat = self.apply_boolean_mask(is_nat.unary_operator("not"))
all_same_length = (
libstrings.count_characters(without_nat).distinct_count(
dropna=True
)
== 1
)
if not all_same_length:
# Unfortunately disables OK cases like:
# ["2020-01-01", "2020-01-01 00:00:00"]
# But currently incorrect for cases like (drops 10):
# ["2020-01-01", "2020-01-01 10:00:00"]
raise NotImplementedError(
"Cannot parse date-like strings with different formats"
)
valid_ts = str_cast.istimestamp(self, format)
valid = valid_ts | is_nat
if not valid.all():
raise ValueError(f"Column contains invalid data for {format=}")
casting_func = (
str_cast.timestamp2int
if dtype.type == np.datetime64
else str_cast.timedelta2int
)
result_col = casting_func(self, dtype, format)
if is_nat.any():
result_col[is_nat] = None
return result_col
def as_datetime_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.DatetimeColumn":
out_dtype = cudf.api.types.dtype(dtype)
# infer on host from the first not na element
# or return all null column if all values
# are null in current column
format = kwargs.get("format", None)
if format is None:
if self.null_count == len(self):
return cast(
"cudf.core.column.DatetimeColumn",
column.column_empty(
len(self), dtype=out_dtype, masked=True
),
)
else:
format = datetime.infer_format(
self.apply_boolean_mask(self.notnull()).element_indexing(0)
)
return self._as_datetime_or_timedelta_column(out_dtype, format)
def as_timedelta_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.TimeDeltaColumn":
out_dtype = cudf.api.types.dtype(dtype)
format = "%D days %H:%M:%S"
return self._as_datetime_or_timedelta_column(out_dtype, format)
def as_decimal_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.DecimalBaseColumn":
return libstrings.to_decimal(self, dtype)
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> StringColumn:
return self
@property
def values_host(self) -> np.ndarray:
"""
Return a numpy representation of the StringColumn.
"""
return self.to_pandas().values
@property
def values(self) -> cupy.ndarray:
"""
Return a CuPy representation of the StringColumn.
"""
raise TypeError("String Arrays is not yet implemented in cudf")
def to_pandas(
self,
index: Optional[pd.Index] = None,
nullable: bool = False,
**kwargs,
) -> pd.Series:
if nullable:
pandas_array = pd.StringDtype().__from_arrow__(self.to_arrow())
pd_series = pd.Series(pandas_array, copy=False)
else:
pd_series = self.to_arrow().to_pandas(**kwargs)
if index is not None:
pd_series.index = index
return pd_series
def can_cast_safely(self, to_dtype: Dtype) -> bool:
to_dtype = cudf.api.types.dtype(to_dtype)
if self.dtype == to_dtype:
return True
elif (
to_dtype.kind in {"i", "u"}
and not libstrings.is_integer(self).all()
):
return False
elif to_dtype.kind == "f" and not libstrings.is_float(self).all():
return False
else:
return True
def find_and_replace(
self,
to_replace: ColumnLike,
replacement: ColumnLike,
all_nan: bool = False,
) -> StringColumn:
"""
Return col with *to_replace* replaced with *value*
"""
to_replace_col = column.as_column(to_replace)
replacement_col = column.as_column(replacement)
if type(to_replace_col) != type(replacement_col):
raise TypeError(
f"to_replace and value should be of same types,"
f"got to_replace dtype: {to_replace_col.dtype} and "
f"value dtype: {replacement_col.dtype}"
)
if (
to_replace_col.dtype != self.dtype
and replacement_col.dtype != self.dtype
):
return self.copy()
df = cudf.DataFrame._from_data(
{"old": to_replace_col, "new": replacement_col}
)
df = df.drop_duplicates(subset=["old"], keep="last", ignore_index=True)
if df._data["old"].null_count == 1:
res = self.fillna(
df._data["new"]
.apply_boolean_mask(df._data["old"].isnull())
.element_indexing(0)
)
df = df.dropna(subset=["old"])
else:
res = self
return libcudf.replace.replace(res, df._data["old"], df._data["new"])
def fillna(
self,
fill_value: Any = None,
method: Optional[str] = None,
dtype: Optional[Dtype] = None,
) -> StringColumn:
if fill_value is not None:
if not is_scalar(fill_value):
fill_value = column.as_column(fill_value, dtype=self.dtype)
elif cudf._lib.scalar._is_null_host_scalar(fill_value):
# Trying to fill <NA> with <NA> value? Return copy.
return self.copy(deep=True)
return super().fillna(value=fill_value, dtype="object")
else:
return super().fillna(method=method)
def normalize_binop_value(
self, other
) -> Union[column.ColumnBase, cudf.Scalar]:
if (
isinstance(other, (column.ColumnBase, cudf.Scalar))
and other.dtype == "object"
):
return other
if is_scalar(other):
return cudf.Scalar(other)
return NotImplemented
def _binaryop(
self, other: ColumnBinaryOperand, op: str
) -> "column.ColumnBase":
reflect, op = self._check_reflected_op(op)
# Due to https://github.com/pandas-dev/pandas/issues/46332 we need to
# support binary operations between empty or all null string columns
# and columns of other dtypes, even if those operations would otherwise
# be invalid. For example, you cannot divide strings, but pandas allows
# division between an empty string column and a (nonempty) integer
# column. Ideally we would disable these operators entirely, but until
# the above issue is resolved we cannot avoid this problem.
if self.null_count == len(self):
if op in {
"__add__",
"__sub__",
"__mul__",
"__mod__",
"__pow__",
"__truediv__",
"__floordiv__",
}:
return self
elif op in {"__eq__", "__lt__", "__le__", "__gt__", "__ge__"}:
return self.notnull()
elif op == "__ne__":
return self.isnull()
other = self._wrap_binop_normalization(other)
if other is NotImplemented:
return NotImplemented
if isinstance(other, (StringColumn, str, cudf.Scalar)):
if isinstance(other, cudf.Scalar) and other.dtype != "O":
if op in {
"__eq__",
"__ne__",
}:
return column.full(
len(self), op == "__ne__", dtype="bool"
).set_mask(self.mask)
else:
return NotImplemented
if op == "__add__":
if isinstance(other, cudf.Scalar):
other = cast(
StringColumn,
column.full(len(self), other, dtype="object"),
)
# Explicit types are necessary because mypy infers ColumnBase
# rather than StringColumn and sometimes forgets Scalar.
lhs: Union[cudf.Scalar, StringColumn]
rhs: Union[cudf.Scalar, StringColumn]
lhs, rhs = (other, self) if reflect else (self, other)
return cast(
"column.ColumnBase",
libstrings.concatenate(
[lhs, rhs],
sep=cudf.Scalar(""),
na_rep=cudf.Scalar(None, "str"),
),
)
elif op in {
"__eq__",
"__ne__",
"__gt__",
"__lt__",
"__ge__",
"__le__",
"NULL_EQUALS",
}:
lhs, rhs = (other, self) if reflect else (self, other)
return libcudf.binaryop.binaryop(
lhs=lhs, rhs=rhs, op=op, dtype="bool"
)
return NotImplemented
@copy_docstring(column.ColumnBase.view)
def view(self, dtype) -> "cudf.core.column.ColumnBase":
if self.null_count > 0:
raise ValueError(
"Can not produce a view of a string column with nulls"
)
dtype = cudf.api.types.dtype(dtype)
str_byte_offset = self.base_children[0].element_indexing(self.offset)
str_end_byte_offset = self.base_children[0].element_indexing(
self.offset + self.size
)
char_dtype_size = self.base_children[1].dtype.itemsize
n_bytes_to_view = (
str_end_byte_offset - str_byte_offset
) * char_dtype_size
to_view = column.build_column(
self.base_children[1].data,
dtype=self.base_children[1].dtype,
offset=str_byte_offset,
size=n_bytes_to_view,
)
return to_view.view(dtype)
def _get_cols_list(parent_obj, others):
parent_index = (
parent_obj.index if isinstance(parent_obj, cudf.Series) else parent_obj
)
if (
can_convert_to_column(others)
and len(others) > 0
and (
can_convert_to_column(
others.iloc[0]
if isinstance(others, cudf.Series)
else others[0]
)
)
):
"""
If others is a list-like object (in our case lists & tuples)
just another Series/Index, great go ahead with concatenation.
"""
cols_list = [
column.as_column(frame.reindex(parent_index), dtype="str")
if (
parent_index is not None
and isinstance(frame, cudf.Series)
and not frame.index.equals(parent_index)
)
else column.as_column(frame, dtype="str")
for frame in others
]
return cols_list
elif others is not None and not isinstance(others, StringMethods):
if (
parent_index is not None
and isinstance(others, cudf.Series)
and not others.index.equals(parent_index)
):
others = others.reindex(parent_index)
return [column.as_column(others, dtype="str")]
else:
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarrary "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/decimal.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
import warnings
from decimal import Decimal
from typing import Any, Optional, Sequence, Union, cast
import cupy as cp
import numpy as np
import pyarrow as pa
import cudf
from cudf import _lib as libcudf
from cudf._lib.strings.convert.convert_fixed_point import (
from_decimal as cpp_from_decimal,
)
from cudf._typing import ColumnBinaryOperand, Dtype
from cudf.api.types import is_integer_dtype, is_scalar
from cudf.core.buffer import as_buffer
from cudf.core.column import ColumnBase, as_column
from cudf.core.dtypes import (
Decimal32Dtype,
Decimal64Dtype,
Decimal128Dtype,
DecimalDtype,
)
from cudf.core.mixins import BinaryOperand
from cudf.utils.utils import pa_mask_buffer_to_mask
from .numerical_base import NumericalBaseColumn
class DecimalBaseColumn(NumericalBaseColumn):
"""Base column for decimal32, decimal64 or decimal128 columns"""
dtype: DecimalDtype
_VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS
def as_decimal_column(
self, dtype: Dtype, **kwargs
) -> Union["DecimalBaseColumn"]:
if (
isinstance(dtype, cudf.core.dtypes.DecimalDtype)
and dtype.scale < self.dtype.scale
):
warnings.warn(
"cuDF truncates when downcasting decimals to a lower scale. "
"To round, use Series.round() or DataFrame.round()."
)
if dtype == self.dtype:
return self
return libcudf.unary.cast(self, dtype)
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
if len(self) > 0:
return cpp_from_decimal(self)
else:
return cast(
"cudf.core.column.StringColumn", as_column([], dtype="object")
)
def __pow__(self, other):
if isinstance(other, int):
if other == 0:
res = cudf.core.column.full(
size=len(self), fill_value=1, dtype=self.dtype
)
if self.nullable:
res = res.set_mask(self.mask)
return res
elif other < 0:
raise TypeError("Power of negative integers not supported.")
res = self
for _ in range(other - 1):
res = self * res
return res
else:
raise NotImplementedError(
f"__pow__ of types {self.dtype} and {type(other)} is "
"not yet implemented."
)
# Decimals in libcudf don't support truediv, see
# https://github.com/rapidsai/cudf/pull/7435 for explanation.
def __truediv__(self, other):
return self._binaryop(other, "__div__")
def __rtruediv__(self, other):
return self._binaryop(other, "__rdiv__")
def _binaryop(self, other: ColumnBinaryOperand, op: str):
reflect, op = self._check_reflected_op(op)
other = self._wrap_binop_normalization(other)
if other is NotImplemented:
return NotImplemented
lhs, rhs = (other, self) if reflect else (self, other)
# Binary Arithmetics between decimal columns. `Scale` and `precision`
# are computed outside of libcudf
if op in {"__add__", "__sub__", "__mul__", "__div__"}:
output_type = _get_decimal_type(lhs.dtype, rhs.dtype, op)
result = libcudf.binaryop.binaryop(lhs, rhs, op, output_type)
# TODO: Why is this necessary? Why isn't the result's
# precision already set correctly based on output_type?
result.dtype.precision = output_type.precision
elif op in {
"__eq__",
"__ne__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
}:
result = libcudf.binaryop.binaryop(lhs, rhs, op, bool)
else:
raise TypeError(
f"{op} not supported for the following dtypes: "
f"{self.dtype}, {other.dtype}"
)
return result
def fillna(
self,
value: Any = None,
method: Optional[str] = None,
dtype: Optional[Dtype] = None,
):
"""Fill null values with ``value``.
Returns a copy with null filled.
"""
if isinstance(value, (int, Decimal)):
value = cudf.Scalar(value, dtype=self.dtype)
elif (
isinstance(value, DecimalBaseColumn)
or isinstance(value, cudf.core.column.NumericalColumn)
and is_integer_dtype(value.dtype)
):
value = value.astype(self.dtype)
else:
raise TypeError(
"Decimal columns only support using fillna with decimal and "
"integer values"
)
return super().fillna(value=value, method=method)
def normalize_binop_value(self, other):
if isinstance(other, ColumnBase):
if isinstance(other, cudf.core.column.NumericalColumn):
if not is_integer_dtype(other.dtype):
raise TypeError(
"Decimal columns only support binary operations with "
"integer numerical columns."
)
other = other.as_decimal_column(
self.dtype.__class__(self.dtype.__class__.MAX_PRECISION, 0)
)
elif not isinstance(other, DecimalBaseColumn):
return NotImplemented
elif not isinstance(self.dtype, other.dtype.__class__):
# This branch occurs if we have a DecimalBaseColumn of a
# different size (e.g. 64 instead of 32).
if _same_precision_and_scale(self.dtype, other.dtype):
other = other.astype(self.dtype)
return other
if isinstance(other, cudf.Scalar) and isinstance(
# TODO: Should it be possible to cast scalars of other numerical
# types to decimal?
other.dtype,
cudf.core.dtypes.DecimalDtype,
):
if _same_precision_and_scale(self.dtype, other.dtype):
other = other.astype(self.dtype)
return other
elif is_scalar(other) and isinstance(other, (int, Decimal)):
other = Decimal(other)
metadata = other.as_tuple()
precision = max(len(metadata.digits), metadata.exponent)
scale = -metadata.exponent
return cudf.Scalar(
other, dtype=self.dtype.__class__(precision, scale)
)
return NotImplemented
def _decimal_quantile(
self, q: Union[float, Sequence[float]], interpolation: str, exact: bool
) -> ColumnBase:
quant = [float(q)] if not isinstance(q, (Sequence, np.ndarray)) else q
# get sorted indices and exclude nulls
indices = libcudf.sort.order_by(
[self], [True], "first", stable=True
).slice(self.null_count, len(self))
result = libcudf.quantiles.quantile(
self, quant, interpolation, indices, exact
)
return result._with_type_metadata(self.dtype)
def as_numerical_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.NumericalColumn":
return libcudf.unary.cast(self, dtype)
class Decimal32Column(DecimalBaseColumn):
dtype: Decimal32Dtype
@classmethod
def from_arrow(cls, data: pa.Array):
dtype = Decimal32Dtype.from_arrow(data.type)
mask_buf = data.buffers()[0]
mask = (
mask_buf
if mask_buf is None
else pa_mask_buffer_to_mask(mask_buf, len(data))
)
data_128 = cp.array(np.frombuffer(data.buffers()[1]).view("int32"))
data_32 = data_128[::4].copy()
return cls(
data=as_buffer(data_32.view("uint8")),
size=len(data),
dtype=dtype,
offset=data.offset,
mask=mask,
)
def to_arrow(self):
data_buf_32 = np.array(self.base_data.memoryview()).view("int32")
data_buf_128 = np.empty(len(data_buf_32) * 4, dtype="int32")
# use striding to set the first 32 bits of each 128-bit chunk:
data_buf_128[::4] = data_buf_32
# use striding again to set the remaining bits of each 128-bit chunk:
# 0 for non-negative values, -1 for negative values:
data_buf_128[1::4] = np.piecewise(
data_buf_32, [data_buf_32 < 0], [-1, 0]
)
data_buf_128[2::4] = np.piecewise(
data_buf_32, [data_buf_32 < 0], [-1, 0]
)
data_buf_128[3::4] = np.piecewise(
data_buf_32, [data_buf_32 < 0], [-1, 0]
)
data_buf = pa.py_buffer(data_buf_128)
mask_buf = (
self.base_mask
if self.base_mask is None
else pa.py_buffer(self.base_mask.memoryview())
)
return pa.Array.from_buffers(
type=self.dtype.to_arrow(),
offset=self._offset,
length=self.size,
buffers=[mask_buf, data_buf],
)
def _with_type_metadata(
self: "cudf.core.column.Decimal32Column", dtype: Dtype
) -> "cudf.core.column.Decimal32Column":
if isinstance(dtype, Decimal32Dtype):
self.dtype.precision = dtype.precision
return self
class Decimal128Column(DecimalBaseColumn):
dtype: Decimal128Dtype
@classmethod
def from_arrow(cls, data: pa.Array):
result = cast(Decimal128Dtype, super().from_arrow(data))
result.dtype.precision = data.type.precision
return result
def to_arrow(self):
return super().to_arrow().cast(self.dtype.to_arrow())
def _with_type_metadata(
self: "cudf.core.column.Decimal128Column", dtype: Dtype
) -> "cudf.core.column.Decimal128Column":
if isinstance(dtype, Decimal128Dtype):
self.dtype.precision = dtype.precision
return self
class Decimal64Column(DecimalBaseColumn):
dtype: Decimal64Dtype
def __setitem__(self, key, value):
if isinstance(value, np.integer):
value = int(value)
super().__setitem__(key, value)
@classmethod
def from_arrow(cls, data: pa.Array):
dtype = Decimal64Dtype.from_arrow(data.type)
mask_buf = data.buffers()[0]
mask = (
mask_buf
if mask_buf is None
else pa_mask_buffer_to_mask(mask_buf, len(data))
)
data_128 = cp.array(np.frombuffer(data.buffers()[1]).view("int64"))
data_64 = data_128[::2].copy()
return cls(
data=as_buffer(data_64.view("uint8")),
size=len(data),
dtype=dtype,
offset=data.offset,
mask=mask,
)
def to_arrow(self):
data_buf_64 = np.array(self.base_data.memoryview()).view("int64")
data_buf_128 = np.empty(len(data_buf_64) * 2, dtype="int64")
# use striding to set the first 64 bits of each 128-bit chunk:
data_buf_128[::2] = data_buf_64
# use striding again to set the remaining bits of each 128-bit chunk:
# 0 for non-negative values, -1 for negative values:
data_buf_128[1::2] = np.piecewise(
data_buf_64, [data_buf_64 < 0], [-1, 0]
)
data_buf = pa.py_buffer(data_buf_128)
mask_buf = (
self.base_mask
if self.base_mask is None
else pa.py_buffer(self.base_mask.memoryview())
)
return pa.Array.from_buffers(
type=self.dtype.to_arrow(),
offset=self._offset,
length=self.size,
buffers=[mask_buf, data_buf],
)
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
"Decimals are not yet supported via `__cuda_array_interface__`"
)
def _with_type_metadata(
self: "cudf.core.column.Decimal64Column", dtype: Dtype
) -> "cudf.core.column.Decimal64Column":
if isinstance(dtype, Decimal64Dtype):
self.dtype.precision = dtype.precision
return self
def _get_decimal_type(lhs_dtype, rhs_dtype, op):
"""
Returns the resulting decimal type after calculating
precision & scale when performing the binary operation
`op` for the given dtypes.
For precision & scale calculations see : https://docs.microsoft.com/en-us/sql/t-sql/data-types/precision-scale-and-length-transact-sql
""" # noqa: E501
# This should at some point be hooked up to libcudf's
# binary_operation_fixed_point_scale
p1, p2 = lhs_dtype.precision, rhs_dtype.precision
s1, s2 = lhs_dtype.scale, rhs_dtype.scale
if op in {"__add__", "__sub__"}:
scale = max(s1, s2)
precision = scale + max(p1 - s1, p2 - s2) + 1
if precision > Decimal128Dtype.MAX_PRECISION:
precision = Decimal128Dtype.MAX_PRECISION
scale = Decimal128Dtype.MAX_PRECISION - max(p1 - s1, p2 - s2)
elif op in {"__mul__", "__div__"}:
if op == "__mul__":
scale = s1 + s2
precision = p1 + p2 + 1
else:
scale = max(6, s1 + p2 + 1)
precision = p1 - s1 + s2 + scale
if precision > Decimal128Dtype.MAX_PRECISION:
integral = precision - scale
if integral < 32:
scale = min(scale, Decimal128Dtype.MAX_PRECISION - integral)
elif scale > 6 and integral > 32:
scale = 6
precision = Decimal128Dtype.MAX_PRECISION
else:
raise NotImplementedError()
try:
if isinstance(lhs_dtype, type(rhs_dtype)):
# SCENARIO 1: If `lhs_dtype` & `rhs_dtype` are same, then try to
# see if `precision` & `scale` can be fit into this type.
return lhs_dtype.__class__(precision=precision, scale=scale)
else:
# SCENARIO 2: If `lhs_dtype` & `rhs_dtype` are of different dtypes,
# then try to see if `precision` & `scale` can be fit into the type
# with greater MAX_PRECISION (i.e., the bigger dtype).
if lhs_dtype.MAX_PRECISION >= rhs_dtype.MAX_PRECISION:
return lhs_dtype.__class__(precision=precision, scale=scale)
else:
return rhs_dtype.__class__(precision=precision, scale=scale)
except ValueError:
# Call to _validate fails, which means we need
# to goto SCENARIO 3.
pass
# SCENARIO 3: If either of the above two scenarios fail, then get the
# MAX_PRECISION of `lhs_dtype` & `rhs_dtype` so that we can only check
# and return a dtype that is greater than or equal to input dtype that
# can fit `precision` & `scale`.
max_precision = max(lhs_dtype.MAX_PRECISION, rhs_dtype.MAX_PRECISION)
for decimal_type in (
Decimal32Dtype,
Decimal64Dtype,
Decimal128Dtype,
):
if decimal_type.MAX_PRECISION >= max_precision:
try:
return decimal_type(precision=precision, scale=scale)
except ValueError:
# Call to _validate fails, which means we need
# to try the next dtype
continue
# if we've reached this point, we cannot create a decimal type without
# overflow; raise an informative error
raise ValueError(
f"Performing {op} between columns of type {repr(lhs_dtype)} and "
f"{repr(rhs_dtype)} would result in overflow"
)
def _same_precision_and_scale(lhs: DecimalDtype, rhs: DecimalDtype) -> bool:
return lhs.precision == rhs.precision and lhs.scale == rhs.scale
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/datetime.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
from __future__ import annotations
import datetime
import locale
import re
from locale import nl_langinfo
from typing import Any, Mapping, Optional, Sequence, cast
import numpy as np
import pandas as pd
import pyarrow as pa
import cudf
from cudf import _lib as libcudf
from cudf._typing import (
ColumnBinaryOperand,
DatetimeLikeScalar,
Dtype,
DtypeObj,
ScalarLike,
)
from cudf.api.types import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_scalar,
is_timedelta64_dtype,
)
from cudf.core._compat import PANDAS_GE_220
from cudf.core.buffer import Buffer, cuda_array_interface_wrapper
from cudf.core.column import ColumnBase, as_column, column, string
from cudf.core.column.timedelta import _unit_to_nanoseconds_conversion
from cudf.utils.dtypes import _get_base_dtype
from cudf.utils.utils import _all_bools_with_nulls
if PANDAS_GE_220:
_guess_datetime_format = pd.tseries.api.guess_datetime_format
else:
_guess_datetime_format = pd.core.tools.datetimes.guess_datetime_format
# nanoseconds per time_unit
_dtype_to_format_conversion = {
"datetime64[ns]": "%Y-%m-%d %H:%M:%S.%9f",
"datetime64[us]": "%Y-%m-%d %H:%M:%S.%6f",
"datetime64[ms]": "%Y-%m-%d %H:%M:%S.%3f",
"datetime64[s]": "%Y-%m-%d %H:%M:%S",
}
_DATETIME_SPECIAL_FORMATS = {
"%b",
"%B",
"%A",
"%a",
}
_DATETIME_NAMES = [
nl_langinfo(locale.AM_STR), # type: ignore
nl_langinfo(locale.PM_STR), # type: ignore
nl_langinfo(locale.DAY_1),
nl_langinfo(locale.DAY_2),
nl_langinfo(locale.DAY_3),
nl_langinfo(locale.DAY_4),
nl_langinfo(locale.DAY_5),
nl_langinfo(locale.DAY_6),
nl_langinfo(locale.DAY_7),
nl_langinfo(locale.ABDAY_1),
nl_langinfo(locale.ABDAY_2),
nl_langinfo(locale.ABDAY_3),
nl_langinfo(locale.ABDAY_4),
nl_langinfo(locale.ABDAY_5),
nl_langinfo(locale.ABDAY_6),
nl_langinfo(locale.ABDAY_7),
nl_langinfo(locale.MON_1),
nl_langinfo(locale.MON_2),
nl_langinfo(locale.MON_3),
nl_langinfo(locale.MON_4),
nl_langinfo(locale.MON_5),
nl_langinfo(locale.MON_6),
nl_langinfo(locale.MON_7),
nl_langinfo(locale.MON_8),
nl_langinfo(locale.MON_9),
nl_langinfo(locale.MON_10),
nl_langinfo(locale.MON_11),
nl_langinfo(locale.MON_12),
nl_langinfo(locale.ABMON_1),
nl_langinfo(locale.ABMON_2),
nl_langinfo(locale.ABMON_3),
nl_langinfo(locale.ABMON_4),
nl_langinfo(locale.ABMON_5),
nl_langinfo(locale.ABMON_6),
nl_langinfo(locale.ABMON_7),
nl_langinfo(locale.ABMON_8),
nl_langinfo(locale.ABMON_9),
nl_langinfo(locale.ABMON_10),
nl_langinfo(locale.ABMON_11),
nl_langinfo(locale.ABMON_12),
]
def infer_format(element: str, **kwargs) -> str:
"""
Infers datetime format from a string, also takes cares for `ms` and `ns`
"""
fmt = _guess_datetime_format(element, **kwargs)
if fmt is not None:
if "%z" in fmt or "%Z" in fmt:
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
return fmt
element_parts = element.split(".")
if len(element_parts) != 2:
raise ValueError("Given date string not likely a datetime.")
# There is possibility that the element is of following format
# '00:00:03.333333 2016-01-01'
second_parts = re.split(r"(\D+)", element_parts[1], maxsplit=1)
subsecond_fmt = ".%" + str(len(second_parts[0])) + "f"
first_part = _guess_datetime_format(element_parts[0], **kwargs)
# For the case where first_part is '00:00:03'
if first_part is None:
tmp = "1970-01-01 " + element_parts[0]
first_part = _guess_datetime_format(tmp, **kwargs).split(" ", 1)[1]
if first_part is None:
raise ValueError("Unable to infer the timestamp format from the data")
if len(second_parts) > 1:
# We may have a non-digit, timezone-like component
# like Z, UTC-3, +01:00
if any(re.search(r"\D", part) for part in second_parts):
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
second_part = "".join(second_parts[1:])
if len(second_part) > 1:
# Only infer if second_parts is not an empty string.
second_part = _guess_datetime_format(second_part, **kwargs)
else:
second_part = ""
try:
fmt = first_part + subsecond_fmt + second_part
except Exception:
raise ValueError("Unable to infer the timestamp format from the data")
return fmt
def _resolve_mixed_dtypes(
lhs: ColumnBinaryOperand, rhs: ColumnBinaryOperand, base_type: str
) -> Dtype:
units = ["s", "ms", "us", "ns"]
lhs_time_unit = cudf.utils.dtypes.get_time_unit(lhs)
lhs_unit = units.index(lhs_time_unit)
rhs_time_unit = cudf.utils.dtypes.get_time_unit(rhs)
rhs_unit = units.index(rhs_time_unit)
return cudf.dtype(f"{base_type}[{units[max(lhs_unit, rhs_unit)]}]")
def _get_datetime_format(col, dtype, time_unit):
format = _dtype_to_format_conversion.get(dtype.name, "%Y-%m-%d %H:%M:%S")
if format.endswith("f"):
sub_second_res_len = 3
else:
sub_second_res_len = 0
has_nanos = time_unit in {"ns"} and col.get_dt_field("nanosecond").any()
has_micros = (
time_unit in {"ns", "us"} and col.get_dt_field("microsecond").any()
)
has_millis = (
time_unit in {"ns", "us", "ms"}
and col.get_dt_field("millisecond").any()
)
has_seconds = col.get_dt_field("second").any()
has_minutes = col.get_dt_field("minute").any()
has_hours = col.get_dt_field("hour").any()
if sub_second_res_len:
if has_nanos:
# format should be intact and rest of the
# following conditions shouldn't execute.
pass
elif has_micros:
format = format[:-sub_second_res_len] + "%6f"
elif has_millis:
format = format[:-sub_second_res_len] + "%3f"
elif has_seconds or has_minutes or has_hours:
format = format[:-4]
else:
format = format.split(" ")[0]
else:
if not (has_seconds or has_minutes or has_hours):
format = format.split(" ")[0]
return format
class DatetimeColumn(column.ColumnBase):
"""
A Column implementation for Date-time types.
Parameters
----------
data : Buffer
The datetime values
dtype : np.dtype
The data type
mask : Buffer; optional
The validity mask
"""
_VALID_BINARY_OPERATIONS = {
"__eq__",
"__ne__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__add__",
"__sub__",
"__radd__",
"__rsub__",
}
def __init__(
self,
data: Buffer,
dtype: DtypeObj,
mask: Optional[Buffer] = None,
size: Optional[int] = None, # TODO: make non-optional
offset: int = 0,
null_count: Optional[int] = None,
):
dtype = cudf.dtype(dtype)
if data.size % dtype.itemsize:
raise ValueError("Buffer size must be divisible by element size")
if size is None:
size = data.size // dtype.itemsize
size = size - offset
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
)
if self.dtype.type is not np.datetime64:
raise TypeError(f"{self.dtype} is not a supported datetime type")
self._time_unit, _ = np.datetime_data(self.dtype)
def __contains__(self, item: ScalarLike) -> bool:
try:
item_as_dt64 = np.datetime64(item, self._time_unit)
except ValueError:
# If item cannot be converted to datetime type
# np.datetime64 raises ValueError, hence `item`
# cannot exist in `self`.
return False
return item_as_dt64.astype("int64") in self.as_numerical
@property
def time_unit(self) -> str:
return self._time_unit
@property
def year(self) -> ColumnBase:
return self.get_dt_field("year")
@property
def month(self) -> ColumnBase:
return self.get_dt_field("month")
@property
def day(self) -> ColumnBase:
return self.get_dt_field("day")
@property
def hour(self) -> ColumnBase:
return self.get_dt_field("hour")
@property
def minute(self) -> ColumnBase:
return self.get_dt_field("minute")
@property
def second(self) -> ColumnBase:
return self.get_dt_field("second")
@property
def weekday(self) -> ColumnBase:
return self.get_dt_field("weekday")
@property
def dayofyear(self) -> ColumnBase:
return self.get_dt_field("day_of_year")
@property
def day_of_year(self) -> ColumnBase:
return self.get_dt_field("day_of_year")
def to_pandas(
self,
index: Optional[pd.Index] = None,
nullable: bool = False,
**kwargs,
) -> "cudf.Series":
# `copy=True` workaround until following issue is fixed:
# https://issues.apache.org/jira/browse/ARROW-9772
# Pandas only supports `datetime64[ns]` dtype
# and conversion to this type is necessary to make
# arrow to pandas conversion happen for large values.
return pd.Series(
self.astype("datetime64[ns]").to_arrow(),
copy=True,
dtype=self.dtype,
index=index,
)
@property
def values(self):
"""
Return a CuPy representation of the DateTimeColumn.
"""
raise NotImplementedError(
"DateTime Arrays is not yet implemented in cudf"
)
def get_dt_field(self, field: str) -> ColumnBase:
return libcudf.datetime.extract_datetime_component(self, field)
def ceil(self, freq: str) -> ColumnBase:
return libcudf.datetime.ceil_datetime(self, freq)
def floor(self, freq: str) -> ColumnBase:
return libcudf.datetime.floor_datetime(self, freq)
def round(self, freq: str) -> ColumnBase:
return libcudf.datetime.round_datetime(self, freq)
def normalize_binop_value(self, other: DatetimeLikeScalar) -> ScalarLike:
if isinstance(other, (cudf.Scalar, ColumnBase, cudf.DateOffset)):
return other
tz_error_msg = (
"Cannot perform binary operation on timezone-naive columns"
" and timezone-aware timestamps."
)
if isinstance(other, pd.Timestamp):
if other.tz is not None:
raise NotImplementedError(tz_error_msg)
other = other.to_datetime64()
elif isinstance(other, pd.Timedelta):
other = other.to_timedelta64()
elif isinstance(other, datetime.datetime):
if other.tzinfo is not None:
raise NotImplementedError(tz_error_msg)
other = np.datetime64(other)
elif isinstance(other, datetime.timedelta):
other = np.timedelta64(other)
if isinstance(other, np.datetime64):
if np.isnat(other):
return cudf.Scalar(None, dtype=self.dtype)
other = other.astype(self.dtype)
return cudf.Scalar(other)
elif isinstance(other, np.timedelta64):
other_time_unit = cudf.utils.dtypes.get_time_unit(other)
if other_time_unit not in {"s", "ms", "ns", "us"}:
other = other.astype("timedelta64[s]")
if np.isnat(other):
return cudf.Scalar(None, dtype=other.dtype)
return cudf.Scalar(other)
elif isinstance(other, str):
try:
return cudf.Scalar(other, dtype=self.dtype)
except ValueError:
pass
return NotImplemented
@property
def as_numerical(self) -> "cudf.core.column.NumericalColumn":
return cast(
"cudf.core.column.NumericalColumn",
column.build_column(
data=self.base_data,
dtype=np.int64,
mask=self.base_mask,
offset=self.offset,
size=self.size,
),
)
@property
def __cuda_array_interface__(self) -> Mapping[str, Any]:
output = {
"shape": (len(self),),
"strides": (self.dtype.itemsize,),
"typestr": self.dtype.str,
"data": (self.data_ptr, False),
"version": 1,
}
if self.nullable and self.has_nulls():
# Create a simple Python object that exposes the
# `__cuda_array_interface__` attribute here since we need to modify
# some of the attributes from the numba device array
output["mask"] = cuda_array_interface_wrapper(
ptr=self.mask_ptr,
size=len(self),
owner=self.mask,
readonly=True,
typestr="<t1",
)
return output
def as_datetime_column(self, dtype: Dtype, **kwargs) -> DatetimeColumn:
dtype = cudf.dtype(dtype)
if dtype == self.dtype:
return self
return libcudf.unary.cast(self, dtype=dtype)
def as_timedelta_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.TimeDeltaColumn":
raise TypeError(
f"cannot astype a datetimelike from {self.dtype} to {dtype}"
)
def as_numerical_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.NumericalColumn":
return cast(
"cudf.core.column.NumericalColumn", self.as_numerical.astype(dtype)
)
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
if format is None:
format = _dtype_to_format_conversion.get(
self.dtype.name, "%Y-%m-%d %H:%M:%S"
)
if cudf.get_option("mode.pandas_compatible"):
format = _get_datetime_format(
self, dtype=self.dtype, time_unit=self.time_unit
)
if format in _DATETIME_SPECIAL_FORMATS:
names = as_column(_DATETIME_NAMES)
else:
names = cudf.core.column.column_empty(
0, dtype="object", masked=False
)
if len(self) > 0:
return string._datetime_to_str_typecast_functions[
cudf.dtype(self.dtype)
](self, format, names)
else:
return cast(
"cudf.core.column.StringColumn",
column.column_empty(0, dtype="object", masked=False),
)
def mean(
self, skipna=None, min_count: int = 0, dtype=np.float64
) -> ScalarLike:
return pd.Timestamp(
self.as_numerical.mean(
skipna=skipna, min_count=min_count, dtype=dtype
),
unit=self.time_unit,
)
def std(
self,
skipna: Optional[bool] = None,
min_count: int = 0,
dtype: Dtype = np.float64,
ddof: int = 1,
) -> pd.Timedelta:
return pd.Timedelta(
self.as_numerical.std(
skipna=skipna, min_count=min_count, dtype=dtype, ddof=ddof
)
* _unit_to_nanoseconds_conversion[self.time_unit],
)
def median(self, skipna: Optional[bool] = None) -> pd.Timestamp:
return pd.Timestamp(
self.as_numerical.median(skipna=skipna), unit=self.time_unit
)
def quantile(
self,
q: np.ndarray,
interpolation: str,
exact: bool,
return_scalar: bool,
) -> ColumnBase:
result = self.as_numerical.quantile(
q=q,
interpolation=interpolation,
exact=exact,
return_scalar=return_scalar,
)
if return_scalar:
return pd.Timestamp(result, unit=self.time_unit)
return result.astype(self.dtype)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
reflect, op = self._check_reflected_op(op)
other = self._wrap_binop_normalization(other)
if other is NotImplemented:
return NotImplemented
if isinstance(other, cudf.DateOffset):
return other._datetime_binop(self, op, reflect=reflect)
# We check this on `other` before reflection since we already know the
# dtype of `self`.
other_is_timedelta = is_timedelta64_dtype(other.dtype)
other_is_datetime64 = not other_is_timedelta and is_datetime64_dtype(
other.dtype
)
lhs, rhs = (other, self) if reflect else (self, other)
out_dtype = None
if (
op
in {
"__ne__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
}
and other_is_datetime64
):
out_dtype = cudf.dtype(np.bool_)
elif op == "__add__" and other_is_timedelta:
# The only thing we can add to a datetime is a timedelta. This
# operation is symmetric, i.e. we allow `datetime + timedelta` or
# `timedelta + datetime`. Both result in DatetimeColumns.
out_dtype = _resolve_mixed_dtypes(lhs, rhs, "datetime64")
elif op == "__sub__":
# Subtracting a datetime from a datetime results in a timedelta.
if other_is_datetime64:
out_dtype = _resolve_mixed_dtypes(lhs, rhs, "timedelta64")
# We can subtract a timedelta from a datetime, but not vice versa.
# Not only is subtraction antisymmetric (as is normal), it is only
# well-defined if this operation was not invoked via reflection.
elif other_is_timedelta and not reflect:
out_dtype = _resolve_mixed_dtypes(lhs, rhs, "datetime64")
elif op in {
"__eq__",
"NULL_EQUALS",
"__ne__",
}:
out_dtype = cudf.dtype(np.bool_)
if isinstance(other, ColumnBase) and not isinstance(
other, DatetimeColumn
):
result = _all_bools_with_nulls(
self, other, bool_fill_value=op == "__ne__"
)
if cudf.get_option("mode.pandas_compatible"):
result = result.fillna(op == "__ne__")
return result
if out_dtype is None:
return NotImplemented
result = libcudf.binaryop.binaryop(lhs, rhs, op, out_dtype)
if cudf.get_option(
"mode.pandas_compatible"
) and out_dtype == cudf.dtype(np.bool_):
result = result.fillna(op == "__ne__")
return result
def fillna(
self,
fill_value: Any = None,
method: Optional[str] = None,
dtype: Optional[Dtype] = None,
) -> DatetimeColumn:
if fill_value is not None:
if cudf.utils.utils._isnat(fill_value):
return self.copy(deep=True)
if is_scalar(fill_value):
if not isinstance(fill_value, cudf.Scalar):
fill_value = cudf.Scalar(fill_value, dtype=self.dtype)
else:
fill_value = column.as_column(fill_value, nan_as_null=False)
return super().fillna(fill_value, method)
def indices_of(
self, value: ScalarLike
) -> cudf.core.column.NumericalColumn:
value = column.as_column(
pd.to_datetime(value), dtype=self.dtype
).as_numerical
return self.as_numerical.indices_of(value)
@property
def is_unique(self) -> bool:
return self.as_numerical.is_unique
def isin(self, values: Sequence) -> ColumnBase:
return cudf.core.tools.datetimes._isin_datetimelike(self, values)
def can_cast_safely(self, to_dtype: Dtype) -> bool:
if np.issubdtype(to_dtype, np.datetime64):
to_res, _ = np.datetime_data(to_dtype)
self_res, _ = np.datetime_data(self.dtype)
max_int = np.iinfo(cudf.dtype("int64")).max
max_dist = np.timedelta64(
self.max().astype(cudf.dtype("int64"), copy=False), self_res
)
min_dist = np.timedelta64(
self.min().astype(cudf.dtype("int64"), copy=False), self_res
)
self_delta_dtype = np.timedelta64(0, self_res).dtype
if max_dist <= np.timedelta64(max_int, to_res).astype(
self_delta_dtype
) and min_dist <= np.timedelta64(max_int, to_res).astype(
self_delta_dtype
):
return True
else:
return False
elif to_dtype == cudf.dtype("int64") or to_dtype == cudf.dtype("O"):
# can safely cast to representation, or string
return True
else:
return False
def _with_type_metadata(self, dtype):
if is_datetime64tz_dtype(dtype):
return DatetimeTZColumn(
data=self.base_data,
dtype=dtype,
mask=self.base_mask,
size=self.size,
offset=self.offset,
null_count=self.null_count,
)
return self
class DatetimeTZColumn(DatetimeColumn):
def __init__(
self,
data: Buffer,
dtype: pd.DatetimeTZDtype,
mask: Optional[Buffer] = None,
size: Optional[int] = None,
offset: int = 0,
null_count: Optional[int] = None,
):
super().__init__(
data=data,
dtype=_get_base_dtype(dtype),
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
self._dtype = dtype
def to_pandas(
self,
index: Optional[pd.Index] = None,
nullable: bool = False,
**kwargs,
) -> "cudf.Series":
return self._local_time.to_pandas().dt.tz_localize(
self.dtype.tz, ambiguous="NaT", nonexistent="NaT"
)
def to_arrow(self):
return pa.compute.assume_timezone(
self._local_time.to_arrow(), str(self.dtype.tz)
)
@property
def _utc_time(self):
"""Return UTC time as naive timestamps."""
return DatetimeColumn(
data=self.base_data,
dtype=_get_base_dtype(self.dtype),
mask=self.base_mask,
size=self.size,
offset=self.offset,
null_count=self.null_count,
)
@property
def _local_time(self):
"""Return the local time as naive timestamps."""
from cudf.core._internals.timezones import utc_to_local
return utc_to_local(self, str(self.dtype.tz))
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
return self._local_time.as_string_column(dtype, format, **kwargs)
def get_dt_field(self, field: str) -> ColumnBase:
return libcudf.datetime.extract_datetime_component(
self._local_time, field
)
def __repr__(self):
# Arrow prints the UTC timestamps, but we want to print the
# local timestamps:
arr = self._local_time.to_arrow().cast(
pa.timestamp(self.dtype.unit, str(self.dtype.tz))
)
return (
f"{object.__repr__(self)}\n"
f"{arr.to_string()}\n"
f"dtype: {self.dtype}"
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/column.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from __future__ import annotations
import builtins
import pickle
from collections import abc
from functools import cached_property
from itertools import chain
from types import SimpleNamespace
from typing import (
Any,
Dict,
List,
MutableSequence,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
from numba import cuda
from typing_extensions import Self
import rmm
import cudf
from cudf import _lib as libcudf
from cudf._lib.column import Column
from cudf._lib.null_mask import (
MaskState,
bitmask_allocation_size_bytes,
create_null_mask,
)
from cudf._lib.scalar import as_device_scalar
from cudf._lib.stream_compaction import (
apply_boolean_mask,
distinct_count as cpp_distinct_count,
drop_duplicates,
drop_nulls,
)
from cudf._lib.transform import bools_to_mask
from cudf._lib.types import size_type_dtype
from cudf._typing import ColumnLike, Dtype, ScalarLike
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
_is_pandas_nullable_extension_dtype,
infer_dtype,
is_bool_dtype,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_decimal32_dtype,
is_decimal64_dtype,
is_decimal128_dtype,
is_decimal_dtype,
is_dtype_equal,
is_integer_dtype,
is_interval_dtype,
is_list_dtype,
is_scalar,
is_string_dtype,
is_struct_dtype,
)
from cudf.core._compat import PANDAS_GE_150
from cudf.core.abc import Serializable
from cudf.core.buffer import (
Buffer,
acquire_spill_lock,
as_buffer,
cuda_array_interface_wrapper,
)
from cudf.core.dtypes import (
CategoricalDtype,
IntervalDtype,
ListDtype,
StructDtype,
)
from cudf.core.mixins import BinaryOperand, Reducible
from cudf.errors import MixedTypeError
from cudf.utils.dtypes import (
_maybe_convert_to_default_type,
cudf_dtype_from_pa_type,
get_time_unit,
is_mixed_with_object_dtype,
min_scalar_type,
min_unsigned_type,
np_to_pa_dtype,
pandas_dtypes_alias_to_cudf_alias,
pandas_dtypes_to_np_dtypes,
)
from cudf.utils.utils import _array_ufunc, mask_dtype
if PANDAS_GE_150:
from pandas.core.arrays.arrow.extension_types import ArrowIntervalType
else:
from pandas.core.arrays._arrow_utils import ArrowIntervalType
class ColumnBase(Column, Serializable, BinaryOperand, Reducible):
_VALID_REDUCTIONS = {
"any",
"all",
"max",
"min",
}
def as_frame(self) -> "cudf.core.frame.Frame":
"""
Converts a Column to Frame
"""
return cudf.core.single_column_frame.SingleColumnFrame(
{None: self.copy(deep=False)}
)
def data_array_view(
self, *, mode="write"
) -> "cuda.devicearray.DeviceNDArray":
"""
View the data as a device array object
Parameters
----------
mode : str, default 'write'
Supported values are {'read', 'write'}
If 'write' is passed, a device array object
with readonly flag set to False in CAI is returned.
If 'read' is passed, a device array object
with readonly flag set to True in CAI is returned.
This also means, If the caller wishes to modify
the data returned through this view, they must
pass mode="write", else pass mode="read".
Returns
-------
numba.cuda.cudadrv.devicearray.DeviceNDArray
"""
if self.data is not None:
if mode == "read":
obj = cuda_array_interface_wrapper(
ptr=self.data.get_ptr(mode="read"),
size=self.data.size,
owner=self.data,
)
elif mode == "write":
obj = self.data
else:
raise ValueError(f"Unsupported mode: {mode}")
else:
obj = None
return cuda.as_cuda_array(obj).view(self.dtype)
def mask_array_view(
self, *, mode="write"
) -> "cuda.devicearray.DeviceNDArray":
"""
View the mask as a device array
Parameters
----------
mode : str, default 'write'
Supported values are {'read', 'write'}
If 'write' is passed, a device array object
with readonly flag set to False in CAI is returned.
If 'read' is passed, a device array object
with readonly flag set to True in CAI is returned.
This also means, If the caller wishes to modify
the data returned through this view, they must
pass mode="write", else pass mode="read".
Returns
-------
numba.cuda.cudadrv.devicearray.DeviceNDArray
"""
if self.mask is not None:
if mode == "read":
obj = cuda_array_interface_wrapper(
ptr=self.mask.get_ptr(mode="read"),
size=self.mask.size,
owner=self.mask,
)
elif mode == "write":
obj = self.mask
else:
raise ValueError(f"Unsupported mode: {mode}")
else:
obj = None
return cuda.as_cuda_array(obj).view(mask_dtype)
def __len__(self) -> int:
return self.size
def __repr__(self):
return (
f"{object.__repr__(self)}\n"
f"{self.to_arrow().to_string()}\n"
f"dtype: {self.dtype}"
)
def to_pandas(
self, index: Optional[pd.Index] = None, **kwargs
) -> pd.Series:
"""Convert object to pandas type.
The default implementation falls back to PyArrow for the conversion.
"""
# This default implementation does not handle nulls in any meaningful
# way, but must consume the parameter to avoid passing it to PyArrow
# (which does not recognize it).
kwargs.pop("nullable", None)
pd_series = self.to_arrow().to_pandas(**kwargs)
if index is not None:
pd_series.index = index
return pd_series
@property
def values_host(self) -> "np.ndarray":
"""
Return a numpy representation of the Column.
"""
if len(self) == 0:
return np.array([], dtype=self.dtype)
if self.has_nulls():
raise ValueError("Column must have no nulls.")
with acquire_spill_lock():
return self.data_array_view(mode="read").copy_to_host()
@property
def values(self) -> "cupy.ndarray":
"""
Return a CuPy representation of the Column.
"""
if len(self) == 0:
return cupy.array([], dtype=self.dtype)
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return cupy.asarray(self.data_array_view(mode="write"))
def find_and_replace(
self,
to_replace: ColumnLike,
replacement: ColumnLike,
all_nan: bool = False,
) -> Self:
raise NotImplementedError
def clip(self, lo: ScalarLike, hi: ScalarLike) -> ColumnBase:
return libcudf.replace.clip(self, lo, hi)
def equals(self, other: ColumnBase, check_dtypes: bool = False) -> bool:
if self is other:
return True
if other is None or len(self) != len(other):
return False
if check_dtypes and (self.dtype != other.dtype):
return False
ret = self._binaryop(other, "NULL_EQUALS")
if ret is NotImplemented:
raise TypeError(f"Cannot compare equality with {type(other)}")
return ret.all()
def all(self, skipna: bool = True) -> bool:
# The skipna argument is only used for numerical columns.
# If all entries are null the result is True, including when the column
# is empty.
if self.null_count == self.size:
return True
return libcudf.reduce.reduce("all", self, dtype=np.bool_)
def any(self, skipna: bool = True) -> bool:
# Early exit for fast cases.
if not skipna and self.has_nulls():
return True
elif skipna and self.null_count == self.size:
return False
return libcudf.reduce.reduce("any", self, dtype=np.bool_)
def dropna(self, drop_nan: bool = False) -> ColumnBase:
# The drop_nan argument is only used for numerical columns.
return drop_nulls([self])[0]._with_type_metadata(self.dtype)
def to_arrow(self) -> pa.Array:
"""Convert to PyArrow Array
Examples
--------
>>> import cudf
>>> col = cudf.core.column.as_column([1, 2, 3, 4])
>>> col.to_arrow()
<pyarrow.lib.Int64Array object at 0x7f886547f830>
[
1,
2,
3,
4
]
"""
return libcudf.interop.to_arrow([self], [("None", self.dtype)])[
"None"
].chunk(0)
@classmethod
def from_arrow(cls, array: pa.Array) -> ColumnBase:
"""
Convert PyArrow Array/ChunkedArray to column
Parameters
----------
array : PyArrow Array/ChunkedArray
Returns
-------
column
Examples
--------
>>> import pyarrow as pa
>>> import cudf
>>> cudf.core.column.ColumnBase.from_arrow(pa.array([1, 2, 3, 4]))
<cudf.core.column.numerical.NumericalColumn object at 0x7f8865497ef0>
"""
if not isinstance(array, (pa.Array, pa.ChunkedArray)):
raise TypeError("array should be PyArrow array or chunked array")
data = pa.table([array], [None])
if (
isinstance(array.type, pa.TimestampType)
and array.type.tz is not None
):
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
if isinstance(array.type, pa.DictionaryType):
indices_table = pa.table(
{
"None": pa.chunked_array(
[chunk.indices for chunk in data["None"].chunks],
type=array.type.index_type,
)
}
)
dictionaries_table = pa.table(
{
"None": pa.chunked_array(
[chunk.dictionary for chunk in data["None"].chunks],
type=array.type.value_type,
)
}
)
codes = libcudf.interop.from_arrow(indices_table)[0]
categories = libcudf.interop.from_arrow(dictionaries_table)[0]
return build_categorical_column(
categories=categories,
codes=codes,
mask=codes.base_mask,
size=codes.size,
ordered=array.type.ordered,
)
elif isinstance(array.type, ArrowIntervalType):
return cudf.core.column.IntervalColumn.from_arrow(array)
result = libcudf.interop.from_arrow(data)[0]
return result._with_type_metadata(cudf_dtype_from_pa_type(array.type))
def _get_mask_as_column(self) -> ColumnBase:
return libcudf.transform.mask_to_bools(
self.base_mask, self.offset, self.offset + len(self)
)
@cached_property
def memory_usage(self) -> int:
n = 0
if self.data is not None:
n += self.data.size
if self.nullable:
n += bitmask_allocation_size_bytes(self.size)
return n
def _fill(
self,
fill_value: ScalarLike,
begin: int,
end: int,
inplace: bool = False,
) -> Optional[Self]:
if end <= begin or begin >= self.size:
return self if inplace else self.copy()
# Constructing a cuDF scalar can cut unnecessary DtoH copy if
# the scalar is None when calling `is_valid`.
slr = cudf.Scalar(fill_value, dtype=self.dtype)
if not inplace:
return libcudf.filling.fill(self, begin, end, slr.device_value)
if is_string_dtype(self.dtype):
return self._mimic_inplace(
libcudf.filling.fill(self, begin, end, slr.device_value),
inplace=True,
)
if not slr.is_valid() and not self.nullable:
mask = create_null_mask(self.size, state=MaskState.ALL_VALID)
self.set_base_mask(mask)
libcudf.filling.fill_in_place(self, begin, end, slr.device_value)
return self
def shift(self, offset: int, fill_value: ScalarLike) -> ColumnBase:
return libcudf.copying.shift(self, offset, fill_value)
@property
def valid_count(self) -> int:
"""Number of non-null values"""
return len(self) - self.null_count
@property
def nullmask(self) -> Buffer:
"""The gpu buffer for the null-mask"""
if not self.nullable:
raise ValueError("Column has no null mask")
return self.mask_array_view(mode="read")
def force_deep_copy(self) -> Self:
"""
A method to create deep copy irrespective of whether
`copy-on-write` is enabled.
"""
result = libcudf.copying.copy_column(self)
return result._with_type_metadata(self.dtype)
def copy(self, deep: bool = True) -> Self:
"""
Makes a copy of the Column.
Parameters
----------
deep : bool, default True
If True, a true physical copy of the column
is made.
If False and `copy_on_write` is False, the same
memory is shared between the buffers of the Column
and changes made to one Column will propagate to
its copy and vice-versa.
If False and `copy_on_write` is True, the same
memory is shared between the buffers of the Column
until there is a write operation being performed on
them.
"""
if deep:
return self.force_deep_copy()
else:
return cast(
Self,
build_column(
data=self.base_data
if self.base_data is None
else self.base_data.copy(deep=False),
dtype=self.dtype,
mask=self.base_mask
if self.base_mask is None
else self.base_mask.copy(deep=False),
size=self.size,
offset=self.offset,
children=tuple(
col.copy(deep=False) for col in self.base_children
),
),
)
def view(self, dtype: Dtype) -> ColumnBase:
"""
View the data underlying a column as different dtype.
The source column must divide evenly into the size of
the desired data type. Columns with nulls may only be
viewed as dtypes with size equal to source dtype size
Parameters
----------
dtype : NumPy dtype, string
The dtype to view the data as
"""
dtype = cudf.dtype(dtype)
if dtype.kind in ("o", "u", "s"):
raise TypeError(
"Bytes viewed as str without metadata is ambiguous"
)
if self.dtype.itemsize == dtype.itemsize:
return build_column(
self.base_data,
dtype=dtype,
mask=self.base_mask,
size=self.size,
offset=self.offset,
)
else:
if self.null_count > 0:
raise ValueError(
"Can not produce a view of a column with nulls"
)
if (self.size * self.dtype.itemsize) % dtype.itemsize:
raise ValueError(
f"Can not divide {self.size * self.dtype.itemsize}"
+ f" total bytes into {dtype} with size {dtype.itemsize}"
)
# This assertion prevents mypy errors below.
assert self.base_data is not None
start = self.offset * self.dtype.itemsize
end = start + self.size * self.dtype.itemsize
return build_column(self.base_data[start:end], dtype=dtype)
def element_indexing(self, index: int):
"""Default implementation for indexing to an element
Raises
------
``IndexError`` if out-of-bound
"""
idx = np.int32(index)
if idx < 0:
idx = len(self) + idx
if idx > len(self) - 1 or idx < 0:
raise IndexError("single positional indexer is out-of-bounds")
result = libcudf.copying.get_element(self, idx).value
if cudf.get_option("mode.pandas_compatible"):
if isinstance(result, np.datetime64):
return pd.Timestamp(result)
elif isinstance(result, np.timedelta64):
return pd.Timedelta(result)
return result
def slice(
self, start: int, stop: int, stride: Optional[int] = None
) -> Self:
stride = 1 if stride is None else stride
if start < 0:
start = start + len(self)
if stop < 0 and not (stride < 0 and stop == -1):
stop = stop + len(self)
if (stride > 0 and start >= stop) or (stride < 0 and start <= stop):
return cast(Self, column_empty(0, self.dtype, masked=True))
# compute mask slice
if stride == 1:
return libcudf.copying.column_slice(self, [start, stop])[
0
]._with_type_metadata(self.dtype)
else:
# Need to create a gather map for given slice with stride
gather_map = arange(
start=start,
stop=stop,
step=stride,
dtype=cudf.dtype(np.int32),
)
return self.take(gather_map)
def __setitem__(self, key: Any, value: Any):
"""
Set the value of ``self[key]`` to ``value``.
If ``value`` and ``self`` are of different types, ``value`` is coerced
to ``self.dtype``. Assumes ``self`` and ``value`` are index-aligned.
"""
# Normalize value to scalar/column
value_normalized = (
cudf.Scalar(value, dtype=self.dtype)
if is_scalar(value)
else as_column(value, dtype=self.dtype)
)
out: Optional[ColumnBase] # If None, no need to perform mimic inplace.
if isinstance(key, slice):
out = self._scatter_by_slice(key, value_normalized)
else:
key = as_column(key)
if not isinstance(key, cudf.core.column.NumericalColumn):
raise ValueError(f"Invalid scatter map type {key.dtype}.")
out = self._scatter_by_column(key, value_normalized)
if out:
self._mimic_inplace(out, inplace=True)
def _wrap_binop_normalization(self, other):
if cudf.utils.utils.is_na_like(other):
return cudf.Scalar(other, dtype=self.dtype)
if isinstance(other, np.ndarray) and other.ndim == 0:
# Try and maintain the dtype
other = other.dtype.type(other.item())
return self.normalize_binop_value(other)
def _scatter_by_slice(
self,
key: builtins.slice,
value: Union[cudf.core.scalar.Scalar, ColumnBase],
) -> Optional[Self]:
"""If this function returns None, it's either a no-op (slice is empty),
or the inplace replacement is already performed (fill-in-place).
"""
start, stop, step = key.indices(len(self))
if start >= stop:
return None
num_keys = len(range(start, stop, step))
self._check_scatter_key_length(num_keys, value)
if step == 1 and not isinstance(
self, (cudf.core.column.StructColumn, cudf.core.column.ListColumn)
):
# NOTE: List & Struct dtypes aren't supported by both
# inplace & out-of-place fill. Hence we need to use scatter for
# these two types.
if isinstance(value, cudf.core.scalar.Scalar):
return self._fill(value, start, stop, inplace=True)
else:
return libcudf.copying.copy_range(
value, self, 0, num_keys, start, stop, False
)
# step != 1, create a scatter map with arange
scatter_map = arange(
start=start,
stop=stop,
step=step,
dtype=cudf.dtype(np.int32),
)
return self._scatter_by_column(scatter_map, value)
def _scatter_by_column(
self,
key: cudf.core.column.NumericalColumn,
value: Union[cudf.core.scalar.Scalar, ColumnBase],
) -> Self:
if is_bool_dtype(key.dtype):
# `key` is boolean mask
if len(key) != len(self):
raise ValueError(
"Boolean mask must be of same length as column"
)
if isinstance(value, ColumnBase) and len(self) == len(value):
# Both value and key are aligned to self. Thus, the values
# corresponding to the false values in key should be
# ignored.
value = value.apply_boolean_mask(key)
# After applying boolean mask, the length of value equals
# the number of elements to scatter, we can skip computing
# the sum of ``key`` below.
num_keys = len(value)
else:
# Compute the number of element to scatter by summing all
# `True`s in the boolean mask.
num_keys = key.sum()
else:
# `key` is integer scatter map
num_keys = len(key)
self._check_scatter_key_length(num_keys, value)
if is_bool_dtype(key.dtype):
return libcudf.copying.boolean_mask_scatter([value], [self], key)[
0
]._with_type_metadata(self.dtype)
else:
return libcudf.copying.scatter([value], key, [self])[
0
]._with_type_metadata(self.dtype)
def _check_scatter_key_length(
self, num_keys: int, value: Union[cudf.core.scalar.Scalar, ColumnBase]
):
"""`num_keys` is the number of keys to scatter. Should equal to the
number of rows in ``value`` if ``value`` is a column.
"""
if isinstance(value, ColumnBase):
if len(value) != num_keys:
msg = (
f"Size mismatch: cannot set value "
f"of size {len(value)} to indexing result of size "
f"{num_keys}"
)
raise ValueError(msg)
def fillna(
self,
value: Any = None,
method: Optional[str] = None,
dtype: Optional[Dtype] = None,
) -> Self:
"""Fill null values with ``value``.
Returns a copy with null filled.
"""
return libcudf.replace.replace_nulls(
input_col=self, replacement=value, method=method, dtype=dtype
)._with_type_metadata(self.dtype)
def isnull(self) -> ColumnBase:
"""Identify missing values in a Column."""
result = libcudf.unary.is_null(self)
if self.dtype.kind == "f":
# Need to consider `np.nan` values in case
# of a float column
result = result | libcudf.unary.is_nan(self)
return result
def notnull(self) -> ColumnBase:
"""Identify non-missing values in a Column."""
result = libcudf.unary.is_valid(self)
if self.dtype.kind == "f":
# Need to consider `np.nan` values in case
# of a float column
result = result & libcudf.unary.is_non_nan(self)
return result
def indices_of(
self, value: ScalarLike | Self
) -> cudf.core.column.NumericalColumn:
"""
Find locations of value in the column
Parameters
----------
value
Scalar to look for (cast to dtype of column), or a length-1 column
Returns
-------
Column of indices that match value
"""
if not isinstance(value, ColumnBase):
value = as_column([value], dtype=self.dtype)
else:
assert len(value) == 1
mask = libcudf.search.contains(value, self)
return apply_boolean_mask(
[arange(0, len(self), dtype=size_type_dtype)], mask
)[0]
def _find_first_and_last(self, value: ScalarLike) -> Tuple[int, int]:
indices = self.indices_of(value)
if n := len(indices):
return (
indices.element_indexing(0),
indices.element_indexing(n - 1),
)
else:
raise ValueError(f"Value {value} not found in column")
def find_first_value(self, value: ScalarLike) -> int:
"""
Return index of first value that matches
Parameters
----------
value
Value to search for (cast to dtype of column)
Returns
-------
Index of value
Raises
------
ValueError if value is not found
"""
first, _ = self._find_first_and_last(value)
return first
def find_last_value(self, value: ScalarLike) -> int:
"""
Return index of last value that matches
Parameters
----------
value
Value to search for (cast to dtype of column)
Returns
-------
Index of value
Raises
------
ValueError if value is not found
"""
_, last = self._find_first_and_last(value)
return last
def append(self, other: ColumnBase) -> ColumnBase:
return concat_columns([self, as_column(other)])
def quantile(
self,
q: np.ndarray,
interpolation: str,
exact: bool,
return_scalar: bool,
) -> ColumnBase:
raise TypeError(f"cannot perform quantile with type {self.dtype}")
def take(
self, indices: ColumnBase, nullify: bool = False, check_bounds=True
) -> Self:
"""Return Column by taking values from the corresponding *indices*.
Skip bounds checking if check_bounds is False.
Set rows to null for all out of bound indices if nullify is `True`.
"""
# Handle zero size
if indices.size == 0:
return cast(Self, column_empty_like(self, newsize=0))
# TODO: For performance, the check and conversion of gather map should
# be done by the caller. This check will be removed in future release.
if not is_integer_dtype(indices.dtype):
indices = indices.astype(libcudf.types.size_type_dtype)
if not libcudf.copying._gather_map_is_valid(
indices, len(self), check_bounds, nullify
):
raise IndexError("Gather map index is out of bounds.")
return libcudf.copying.gather([self], indices, nullify=nullify)[
0
]._with_type_metadata(self.dtype)
def isin(self, values: Sequence) -> ColumnBase:
"""Check whether values are contained in the Column.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result: Column
Column of booleans indicating if each element is in values.
"""
try:
lhs, rhs = self._process_values_for_isin(values)
res = lhs._isin_earlystop(rhs)
if res is not None:
return res
except ValueError:
# pandas functionally returns all False when cleansing via
# typecasting fails
return full(len(self), False, dtype="bool")
return lhs._obtain_isin_result(rhs)
def _process_values_for_isin(
self, values: Sequence
) -> Tuple[ColumnBase, ColumnBase]:
"""
Helper function for `isin` which pre-process `values` based on `self`.
"""
lhs = self
rhs = as_column(values, nan_as_null=False)
if lhs.null_count == len(lhs):
lhs = lhs.astype(rhs.dtype)
elif rhs.null_count == len(rhs):
rhs = rhs.astype(lhs.dtype)
return lhs, rhs
def _isin_earlystop(self, rhs: ColumnBase) -> Union[ColumnBase, None]:
"""
Helper function for `isin` which determines possibility of
early-stopping or not.
"""
if self.dtype != rhs.dtype:
if self.null_count and rhs.null_count:
return self.isnull()
else:
return cudf.core.column.full(len(self), False, dtype="bool")
elif self.null_count == 0 and (rhs.null_count == len(rhs)):
return cudf.core.column.full(len(self), False, dtype="bool")
else:
return None
def _obtain_isin_result(self, rhs: ColumnBase) -> ColumnBase:
"""
Helper function for `isin` which merges `self` & `rhs`
to determine what values of `rhs` exist in `self`.
"""
ldf = cudf.DataFrame({"x": self, "orig_order": arange(len(self))})
rdf = cudf.DataFrame(
{"x": rhs, "bool": full(len(rhs), True, dtype="bool")}
)
res = ldf.merge(rdf, on="x", how="left").sort_values(by="orig_order")
res = res.drop_duplicates(subset="orig_order", ignore_index=True)
return res._data["bool"].fillna(False)
def as_mask(self) -> Buffer:
"""Convert booleans to bitmask
Returns
-------
Buffer
"""
if self.has_nulls():
raise ValueError("Column must have no nulls.")
return bools_to_mask(self)
@property
def is_unique(self) -> bool:
return self.distinct_count(dropna=False) == len(self)
@property
def is_monotonic_increasing(self) -> bool:
return not self.has_nulls() and libcudf.sort.is_sorted(
[self], [True], None
)
@property
def is_monotonic_decreasing(self) -> bool:
return not self.has_nulls() and libcudf.sort.is_sorted(
[self], [False], None
)
def sort_values(
self: ColumnBase,
ascending: bool = True,
na_position: str = "last",
) -> ColumnBase:
return libcudf.sort.sort(
[self], column_order=[ascending], null_precedence=[na_position]
)[0]
def distinct_count(self, dropna: bool = True) -> int:
try:
return self._distinct_count[dropna]
except KeyError:
self._distinct_count[dropna] = cpp_distinct_count(
self, ignore_nulls=dropna
)
return self._distinct_count[dropna]
def can_cast_safely(self, to_dtype: Dtype) -> bool:
raise NotImplementedError()
def astype(self, dtype: Dtype, **kwargs) -> ColumnBase:
if self.dtype == dtype:
return self
if is_categorical_dtype(dtype):
return self.as_categorical_column(dtype)
if (
isinstance(dtype, str)
and dtype in pandas_dtypes_alias_to_cudf_alias
):
if cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError("not supported")
else:
dtype = pandas_dtypes_alias_to_cudf_alias[dtype]
elif _is_pandas_nullable_extension_dtype(dtype) and cudf.get_option(
"mode.pandas_compatible"
):
raise NotImplementedError("not supported")
else:
dtype = pandas_dtypes_to_np_dtypes.get(dtype, dtype)
if _is_non_decimal_numeric_dtype(dtype):
return self.as_numerical_column(dtype, **kwargs)
elif is_categorical_dtype(dtype):
return self.as_categorical_column(dtype)
elif cudf.dtype(dtype).type in {
np.str_,
np.object_,
str,
}:
if cudf.get_option("mode.pandas_compatible") and np.dtype(
dtype
).type in {np.object_}:
raise ValueError(
f"Casting to {dtype} is not supported, use "
"`.astype('str')` instead."
)
return self.as_string_column(dtype, **kwargs)
elif is_list_dtype(dtype):
if not self.dtype == dtype:
raise NotImplementedError(
"Casting list columns not currently supported"
)
return self
elif is_struct_dtype(dtype):
if not self.dtype == dtype:
raise NotImplementedError(
"Casting struct columns not currently supported"
)
return self
elif is_interval_dtype(self.dtype):
return self.as_interval_column(dtype, **kwargs)
elif is_decimal_dtype(dtype):
return self.as_decimal_column(dtype, **kwargs)
elif np.issubdtype(cast(Any, dtype), np.datetime64):
return self.as_datetime_column(dtype, **kwargs)
elif np.issubdtype(cast(Any, dtype), np.timedelta64):
return self.as_timedelta_column(dtype, **kwargs)
else:
return self.as_numerical_column(dtype, **kwargs)
def as_categorical_column(self, dtype) -> ColumnBase:
if isinstance(dtype, (cudf.CategoricalDtype, pd.CategoricalDtype)):
ordered = dtype.ordered
else:
ordered = False
# Re-label self w.r.t. the provided categories
if (
isinstance(dtype, cudf.CategoricalDtype)
and dtype._categories is not None
) or (
isinstance(dtype, pd.CategoricalDtype)
and dtype.categories is not None
):
labels = self._label_encoding(cats=as_column(dtype.categories))
return build_categorical_column(
categories=as_column(dtype.categories),
codes=labels,
mask=self.mask,
ordered=dtype.ordered,
)
# Categories must be unique and sorted in ascending order.
cats = self.unique().sort_values().astype(self.dtype)
label_dtype = min_unsigned_type(len(cats))
labels = self._label_encoding(
cats=cats, dtype=label_dtype, na_sentinel=cudf.Scalar(1)
)
# columns include null index in factorization; remove:
if self.has_nulls():
cats = cats.dropna(drop_nan=False)
min_type = min_unsigned_type(len(cats), 8)
if cudf.dtype(min_type).itemsize < labels.dtype.itemsize:
labels = labels.astype(min_type)
return build_categorical_column(
categories=cats,
codes=labels,
mask=self.mask,
ordered=ordered,
)
def as_numerical_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.NumericalColumn":
raise NotImplementedError
def as_datetime_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.DatetimeColumn":
raise NotImplementedError
def as_interval_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.IntervalColumn":
raise NotImplementedError
def as_timedelta_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.TimeDeltaColumn":
raise NotImplementedError
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
raise NotImplementedError
def as_decimal_column(
self, dtype: Dtype, **kwargs
) -> Union["cudf.core.column.decimal.DecimalBaseColumn"]:
raise NotImplementedError
def as_decimal128_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.Decimal128Column":
raise NotImplementedError
def as_decimal64_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.Decimal64Column":
raise NotImplementedError
def as_decimal32_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.Decimal32Column":
raise NotImplementedError
def apply_boolean_mask(self, mask) -> ColumnBase:
mask = as_column(mask)
if not is_bool_dtype(mask.dtype):
raise ValueError("boolean_mask is not boolean type.")
return apply_boolean_mask([self], mask)[0]._with_type_metadata(
self.dtype
)
def argsort(
self, ascending: bool = True, na_position: str = "last"
) -> "cudf.core.column.NumericalColumn":
return libcudf.sort.order_by(
[self], [ascending], na_position, stable=True
)
def __arrow_array__(self, type=None):
raise TypeError(
"Implicit conversion to a host PyArrow Array via __arrow_array__ "
"is not allowed, To explicitly construct a PyArrow Array, "
"consider using .to_arrow()"
)
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
f"dtype {self.dtype} is not yet supported via "
"`__cuda_array_interface__`"
)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return _array_ufunc(self, ufunc, method, inputs, kwargs)
def searchsorted(
self,
value,
side: str = "left",
ascending: bool = True,
na_position: str = "last",
) -> Self:
if not isinstance(value, ColumnBase) or value.dtype != self.dtype:
raise ValueError(
"Column searchsorted expects values to be column of same dtype"
)
return libcudf.search.search_sorted(
[self],
[value],
side=side,
ascending=ascending,
na_position=na_position,
)
def unique(self) -> ColumnBase:
"""
Get unique values in the data
"""
return drop_duplicates([self], keep="first")[0]._with_type_metadata(
self.dtype
)
def serialize(self) -> Tuple[dict, list]:
# data model:
# Serialization produces a nested metadata "header" and a flattened
# list of memoryviews/buffers that reference data (frames). Each
# header advertises a frame_count slot which indicates how many
# frames deserialization will consume. The class used to construct
# an object is named under the key "type-serialized" to match with
# Dask's serialization protocol (see
# distributed.protocol.serialize). Since column dtypes may either be
# cudf native or foreign some special-casing is required here for
# serialization.
header: Dict[Any, Any] = {}
frames = []
header["type-serialized"] = pickle.dumps(type(self))
try:
dtype, dtype_frames = self.dtype.serialize()
header["dtype"] = dtype
frames.extend(dtype_frames)
header["dtype-is-cudf-serialized"] = True
except AttributeError:
header["dtype"] = pickle.dumps(self.dtype)
header["dtype-is-cudf-serialized"] = False
if self.data is not None:
data_header, data_frames = self.data.serialize()
header["data"] = data_header
frames.extend(data_frames)
if self.mask is not None:
mask_header, mask_frames = self.mask.serialize()
header["mask"] = mask_header
frames.extend(mask_frames)
if self.children:
child_headers, child_frames = zip(
*(c.serialize() for c in self.children)
)
header["subheaders"] = list(child_headers)
frames.extend(chain(*child_frames))
header["size"] = self.size
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header: dict, frames: list) -> ColumnBase:
def unpack(header, frames) -> Tuple[Any, list]:
count = header["frame_count"]
klass = pickle.loads(header["type-serialized"])
obj = klass.deserialize(header, frames[:count])
return obj, frames[count:]
assert header["frame_count"] == len(frames), (
f"Deserialization expected {header['frame_count']} frames, "
f"but received {len(frames)}"
)
if header["dtype-is-cudf-serialized"]:
dtype, frames = unpack(header["dtype"], frames)
else:
dtype = pickle.loads(header["dtype"])
if "data" in header:
data, frames = unpack(header["data"], frames)
else:
data = None
if "mask" in header:
mask, frames = unpack(header["mask"], frames)
else:
mask = None
children = []
if "subheaders" in header:
for h in header["subheaders"]:
child, frames = unpack(h, frames)
children.append(child)
assert len(frames) == 0, "Deserialization did not consume all frames"
return build_column(
data=data,
dtype=dtype,
mask=mask,
size=header.get("size", None),
children=tuple(children),
)
def unary_operator(self, unaryop: str):
raise TypeError(
f"Operation {unaryop} not supported for dtype {self.dtype}."
)
def normalize_binop_value(
self, other: ScalarLike
) -> Union[ColumnBase, ScalarLike]:
raise NotImplementedError
def _minmax(self, skipna: Optional[bool] = None):
result_col = self._process_for_reduction(skipna=skipna)
if isinstance(result_col, ColumnBase):
return libcudf.reduce.minmax(result_col)
return result_col
def _reduce(
self,
op: str,
skipna: Optional[bool] = None,
min_count: int = 0,
*args,
**kwargs,
) -> ScalarLike:
"""Compute {op} of column values.
skipna : bool
Whether or not na values must be skipped.
min_count : int, default 0
The minimum number of entries for the reduction, otherwise the
reduction returns NaN.
"""
preprocessed = self._process_for_reduction(
skipna=skipna, min_count=min_count
)
if isinstance(preprocessed, ColumnBase):
return libcudf.reduce.reduce(op, preprocessed, **kwargs)
return preprocessed
@property
def contains_na_entries(self) -> bool:
return self.null_count != 0
def _process_for_reduction(
self, skipna: Optional[bool] = None, min_count: int = 0
) -> Union[ColumnBase, ScalarLike]:
skipna = True if skipna is None else skipna
if skipna:
if self.has_nulls():
result_col = self.dropna()
else:
if self.has_nulls():
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
result_col = self
# TODO: If and when pandas decides to validate that `min_count` >= 0 we
# should insert comparable behavior.
# https://github.com/pandas-dev/pandas/issues/50022
if min_count > 0:
valid_count = len(result_col) - result_col.null_count
if valid_count < min_count:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
return result_col
def _reduction_result_dtype(self, reduction_op: str) -> Dtype:
"""
Determine the correct dtype to pass to libcudf based on
the input dtype, data dtype, and specific reduction op
"""
return self.dtype
def _with_type_metadata(self: ColumnBase, dtype: Dtype) -> ColumnBase:
"""
Copies type metadata from self onto other, returning a new column.
When ``self`` is a nested column, recursively apply this function on
the children of ``self``.
"""
return self
def _label_encoding(
self,
cats: ColumnBase,
dtype: Optional[Dtype] = None,
na_sentinel: Optional[ScalarLike] = None,
):
"""
Convert each value in `self` into an integer code, with `cats`
providing the mapping between codes and values.
Examples
--------
>>> from cudf.core.column import as_column
>>> col = as_column(['foo', 'bar', 'foo', 'baz'])
>>> cats = as_column(['foo', 'bar', 'baz'])
>>> col._label_encoding(cats)
<cudf.core.column.numerical.NumericalColumn object at 0x7f99bf3155c0>
[
0,
1,
0,
2
]
dtype: int8
>>> cats = as_column(['foo', 'bar'])
>>> col._label_encoding(cats)
<cudf.core.column.numerical.NumericalColumn object at 0x7f99bfde0e40>
[
0,
1,
0,
-1
]
dtype: int8
"""
from cudf._lib.join import join as cpp_join
if na_sentinel is None or na_sentinel.value is cudf.NA:
na_sentinel = cudf.Scalar(-1)
def _return_sentinel_column():
return cudf.core.column.full(
size=len(self), fill_value=na_sentinel, dtype=dtype
)
if dtype is None:
dtype = min_scalar_type(max(len(cats), na_sentinel), 8)
if is_mixed_with_object_dtype(self, cats):
return _return_sentinel_column()
try:
# Where there is a type-cast failure, we have
# to catch the exception and return encoded labels
# with na_sentinel values as there would be no corresponding
# encoded values of cats in self.
cats = cats.astype(self.dtype)
except ValueError:
return _return_sentinel_column()
left_gather_map, right_gather_map = cpp_join(
[self], [cats], how="left"
)
codes = libcudf.copying.gather(
[arange(len(cats), dtype=dtype)], right_gather_map, nullify=True
)
del right_gather_map
# reorder `codes` so that its values correspond to the
# values of `self`:
(codes,) = libcudf.sort.sort_by_key(
codes, [left_gather_map], [True], ["last"], stable=True
)
return codes.fillna(na_sentinel.value)
def column_empty_like(
column: ColumnBase,
dtype: Optional[Dtype] = None,
masked: bool = False,
newsize: Optional[int] = None,
) -> ColumnBase:
"""Allocate a new column like the given *column*"""
if dtype is None:
dtype = column.dtype
row_count = len(column) if newsize is None else newsize
if (
hasattr(column, "dtype")
and is_categorical_dtype(column.dtype)
and dtype == column.dtype
):
catcolumn = cast("cudf.core.column.CategoricalColumn", column)
codes = column_empty_like(
catcolumn.codes, masked=masked, newsize=newsize
)
return build_column(
data=None,
dtype=dtype,
mask=codes.base_mask,
children=(codes,),
size=codes.size,
)
return column_empty(row_count, dtype, masked)
def column_empty_like_same_mask(
column: ColumnBase, dtype: Dtype
) -> ColumnBase:
"""Create a new empty Column with the same length and the same mask.
Parameters
----------
dtype : np.dtype like
The dtype of the data buffer.
"""
result = column_empty_like(column, dtype)
if column.nullable:
result = result.set_mask(column.mask)
return result
def column_empty(
row_count: int, dtype: Dtype = "object", masked: bool = False
) -> ColumnBase:
"""Allocate a new column like the given row_count and dtype."""
dtype = cudf.dtype(dtype)
children = () # type: Tuple[ColumnBase, ...]
if is_struct_dtype(dtype):
data = None
children = tuple(
column_empty(row_count, field_dtype)
for field_dtype in dtype.fields.values()
)
elif is_list_dtype(dtype):
data = None
children = (
full(row_count + 1, 0, dtype=libcudf.types.size_type_dtype),
column_empty(row_count, dtype=dtype.element_type),
)
elif is_categorical_dtype(dtype):
data = None
children = (
build_column(
data=as_buffer(
rmm.DeviceBuffer(
size=row_count
* cudf.dtype(libcudf.types.size_type_dtype).itemsize
)
),
dtype=libcudf.types.size_type_dtype,
),
)
elif dtype.kind in "OU" and not is_decimal_dtype(dtype):
data = None
children = (
full(row_count + 1, 0, dtype=libcudf.types.size_type_dtype),
build_column(
data=as_buffer(
rmm.DeviceBuffer(
size=row_count * cudf.dtype("int8").itemsize
)
),
dtype="int8",
),
)
else:
data = as_buffer(rmm.DeviceBuffer(size=row_count * dtype.itemsize))
if masked:
mask = create_null_mask(row_count, state=MaskState.ALL_NULL)
else:
mask = None
return build_column(
data, dtype, mask=mask, size=row_count, children=children
)
def build_column(
data: Union[Buffer, None],
dtype: Dtype,
*,
size: Optional[int] = None,
mask: Optional[Buffer] = None,
offset: int = 0,
null_count: Optional[int] = None,
children: Tuple[ColumnBase, ...] = (),
) -> ColumnBase:
"""
Build a Column of the appropriate type from the given parameters
Parameters
----------
data : Buffer
The data buffer (can be None if constructing certain Column
types like StringColumn, ListColumn, or CategoricalColumn)
dtype
The dtype associated with the Column to construct
mask : Buffer, optional
The mask buffer
size : int, optional
offset : int, optional
children : tuple, optional
"""
dtype = cudf.dtype(dtype)
if _is_non_decimal_numeric_dtype(dtype):
assert data is not None
col = cudf.core.column.NumericalColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
return col
if is_categorical_dtype(dtype):
if not len(children) == 1:
raise ValueError(
"Must specify exactly one child column for CategoricalColumn"
)
if not isinstance(children[0], ColumnBase):
raise TypeError("children must be a tuple of Columns")
return cudf.core.column.CategoricalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
elif dtype.type is np.datetime64:
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.DatetimeColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif is_datetime64tz_dtype(dtype):
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.datetime.DatetimeTZColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type is np.timedelta64:
if data is None:
raise TypeError("Must specify data buffer")
return cudf.core.column.TimeDeltaColumn(
data=data,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
)
elif dtype.type in (np.object_, np.str_):
return cudf.core.column.StringColumn(
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_list_dtype(dtype):
return cudf.core.column.ListColumn(
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
elif is_interval_dtype(dtype):
return cudf.core.column.IntervalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
children=children,
null_count=null_count,
)
elif is_struct_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.StructColumn(
data=data,
dtype=dtype,
size=size,
offset=offset,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal64_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal64Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal32_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal32Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_decimal128_dtype(dtype):
if size is None:
raise TypeError("Must specify size")
return cudf.core.column.Decimal128Column(
data=data,
size=size,
offset=offset,
dtype=dtype,
mask=mask,
null_count=null_count,
children=children,
)
elif is_interval_dtype(dtype):
return cudf.core.column.IntervalColumn(
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
else:
raise TypeError(f"Unrecognized dtype: {dtype}")
def build_categorical_column(
categories: ColumnBase,
codes: ColumnBase,
mask: Optional[Buffer] = None,
size: Optional[int] = None,
offset: int = 0,
null_count: Optional[int] = None,
ordered: bool = False,
) -> "cudf.core.column.CategoricalColumn":
"""
Build a CategoricalColumn
Parameters
----------
categories : Column
Column of categories
codes : Column
Column of codes, the size of the resulting Column will be
the size of `codes`
mask : Buffer
Null mask
size : int, optional
offset : int, optional
ordered : bool, default False
Indicates whether the categories are ordered
"""
codes_dtype = min_unsigned_type(len(categories))
codes = as_column(codes)
if codes.dtype != codes_dtype:
codes = codes.astype(codes_dtype)
dtype = CategoricalDtype(categories=categories, ordered=ordered)
result = build_column(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=(codes,),
)
return cast("cudf.core.column.CategoricalColumn", result)
def build_interval_column(
left_col,
right_col,
mask=None,
size=None,
offset=0,
null_count=None,
closed="right",
):
"""
Build an IntervalColumn
Parameters
----------
left_col : Column
Column of values representing the left of the interval
right_col : Column
Column of representing the right of the interval
mask : Buffer
Null mask
size : int, optional
offset : int, optional
closed : {"left", "right", "both", "neither"}, default "right"
Whether the intervals are closed on the left-side, right-side,
both or neither.
"""
left = as_column(left_col)
right = as_column(right_col)
if closed not in {"left", "right", "both", "neither"}:
closed = "right"
if type(left_col) is not list:
dtype = IntervalDtype(left_col.dtype, closed)
else:
dtype = IntervalDtype("int64", closed)
size = len(left)
return build_column(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=(left, right),
)
def build_list_column(
indices: ColumnBase,
elements: ColumnBase,
mask: Optional[Buffer] = None,
size: Optional[int] = None,
offset: int = 0,
null_count: Optional[int] = None,
) -> "cudf.core.column.ListColumn":
"""
Build a ListColumn
Parameters
----------
indices : ColumnBase
Column of list indices
elements : ColumnBase
Column of list elements
mask: Buffer
Null mask
size: int, optional
offset: int, optional
"""
dtype = ListDtype(element_type=elements.dtype)
if size is None:
if indices.size == 0:
size = 0
else:
# one less because the last element of offsets is the number of
# bytes in the data buffer
size = indices.size - 1
size = size - offset
result = build_column(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=(indices, elements),
)
return cast("cudf.core.column.ListColumn", result)
def build_struct_column(
names: Sequence[str],
children: Tuple[ColumnBase, ...],
dtype: Optional[Dtype] = None,
mask: Optional[Buffer] = None,
size: Optional[int] = None,
offset: int = 0,
null_count: Optional[int] = None,
) -> "cudf.core.column.StructColumn":
"""
Build a StructColumn
Parameters
----------
names : sequence of strings
Field names to map to children dtypes, must be strings.
children : tuple
mask: Buffer
Null mask
size: int, optional
offset: int, optional
"""
if dtype is None:
dtype = StructDtype(
fields={name: col.dtype for name, col in zip(names, children)}
)
result = build_column(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
return cast("cudf.core.column.StructColumn", result)
def _make_copy_replacing_NaT_with_null(column):
"""Return a copy with NaT values replaced with nulls."""
if np.issubdtype(column.dtype, np.timedelta64):
na_value = np.timedelta64("NaT", column.time_unit)
elif np.issubdtype(column.dtype, np.datetime64):
na_value = np.datetime64("NaT", column.time_unit)
else:
raise ValueError("This type does not support replacing NaT with null.")
null = column_empty_like(column, masked=True, newsize=1)
out_col = cudf._lib.replace.replace(
column,
build_column(
as_buffer(np.array([na_value], dtype=column.dtype).view("|u1")),
dtype=column.dtype,
),
null,
)
return out_col
def as_column(
arbitrary: Any,
nan_as_null: Optional[bool] = None,
dtype: Optional[Dtype] = None,
length: Optional[int] = None,
):
"""Create a Column from an arbitrary object
Parameters
----------
arbitrary : object
Object to construct the Column from. See *Notes*.
nan_as_null : bool, optional, default None
If None (default), treats NaN values in arbitrary as null if there is
no mask passed along with it. If True, combines the mask and NaNs to
form a new validity mask. If False, leaves NaN values as is.
dtype : optional
Optionally typecast the constructed Column to the given
dtype.
length : int, optional
If `arbitrary` is a scalar, broadcast into a Column of
the given length.
Returns
-------
A Column of the appropriate type and size.
Notes
-----
Currently support inputs are:
* ``Column``
* ``Series``
* ``Index``
* Scalars (can be broadcasted to a specified `length`)
* Objects exposing ``__cuda_array_interface__`` (e.g., numba device arrays)
* Objects exposing ``__array_interface__``(e.g., numpy arrays)
* pyarrow array
* pandas.Categorical objects
"""
if isinstance(arbitrary, ColumnBase):
if dtype is not None:
return arbitrary.astype(dtype)
else:
return arbitrary
elif isinstance(arbitrary, cudf.Series):
data = arbitrary._column
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, cudf.BaseIndex):
data = arbitrary._values
if dtype is not None:
data = data.astype(dtype)
elif hasattr(arbitrary, "__cuda_array_interface__"):
desc = arbitrary.__cuda_array_interface__
shape = desc["shape"]
if len(shape) > 1:
raise ValueError("Data must be 1-dimensional")
current_dtype = np.dtype(desc["typestr"])
arb_dtype = (
np.dtype("float32")
if current_dtype == "float16"
else cudf.dtype(current_dtype)
)
if desc.get("mask", None) is not None:
# Extract and remove the mask from arbitrary before
# passing to cupy.asarray
mask = _mask_from_cuda_array_interface_desc(arbitrary)
arbitrary = SimpleNamespace(__cuda_array_interface__=desc.copy())
arbitrary.__cuda_array_interface__["mask"] = None
desc = arbitrary.__cuda_array_interface__
else:
mask = None
arbitrary = cupy.asarray(arbitrary)
if arb_dtype != current_dtype:
arbitrary = arbitrary.astype(arb_dtype)
current_dtype = arb_dtype
if (
desc["strides"] is not None
and not (arbitrary.itemsize,) == arbitrary.strides
):
arbitrary = cupy.ascontiguousarray(arbitrary)
data = as_buffer(arbitrary, exposed=cudf.get_option("copy_on_write"))
col = build_column(data, dtype=current_dtype, mask=mask)
if dtype is not None:
col = col.astype(dtype)
if isinstance(col, cudf.core.column.CategoricalColumn):
return col
elif np.issubdtype(col.dtype, np.floating):
if nan_as_null or (mask is None and nan_as_null is None):
mask = libcudf.transform.nans_to_nulls(col.fillna(np.nan))
col = col.set_mask(mask)
elif np.issubdtype(col.dtype, np.datetime64):
if nan_as_null or (mask is None and nan_as_null is None):
col = _make_copy_replacing_NaT_with_null(col)
return col
elif isinstance(arbitrary, (pa.Array, pa.ChunkedArray)):
if isinstance(arbitrary, pa.lib.HalfFloatArray):
raise NotImplementedError(
"Type casting from `float16` to `float32` is not "
"yet supported in pyarrow, see: "
"https://issues.apache.org/jira/browse/ARROW-3802"
)
col = ColumnBase.from_arrow(arbitrary)
if isinstance(arbitrary, pa.NullArray):
if dtype is not None:
# Cast the column to the `dtype` if specified.
new_dtype = dtype
elif len(arbitrary) == 0:
# If the column is empty, it has to be
# a `float64` dtype.
new_dtype = cudf.dtype("float64")
else:
# If the null column is not empty, it has to
# be of `object` dtype.
new_dtype = cudf.dtype(arbitrary.type.to_pandas_dtype())
if cudf.get_option(
"mode.pandas_compatible"
) and new_dtype == cudf.dtype("O"):
# We internally raise if we do `astype("object")`, hence
# need to cast to `str` since this is safe to do so because
# it is a null-array.
new_dtype = "str"
col = col.astype(new_dtype)
return col
elif isinstance(arbitrary, (pd.Series, pd.Categorical)):
if isinstance(arbitrary, pd.Series):
if isinstance(
arbitrary.array, pd.core.arrays.masked.BaseMaskedArray
):
return as_column(arbitrary.array)
elif PANDAS_GE_150 and isinstance(arbitrary.dtype, pd.ArrowDtype):
if cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError("not supported")
return as_column(pa.array(arbitrary.array, from_pandas=True))
elif isinstance(arbitrary.dtype, pd.SparseDtype):
raise NotImplementedError(
f"{arbitrary.dtype} is not supported. Convert first to "
f"{arbitrary.dtype.subtype}."
)
if is_categorical_dtype(arbitrary.dtype):
if isinstance(
arbitrary.dtype.categories.dtype, pd.DatetimeTZDtype
):
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
data = as_column(pa.array(arbitrary, from_pandas=True))
elif is_interval_dtype(arbitrary.dtype):
if isinstance(arbitrary.dtype.subtype, pd.DatetimeTZDtype):
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
data = as_column(pa.array(arbitrary, from_pandas=True))
elif arbitrary.dtype == np.bool_:
data = as_column(cupy.asarray(arbitrary), dtype=arbitrary.dtype)
elif arbitrary.dtype.kind in ("f"):
arb_dtype = np.dtype(arbitrary.dtype)
data = as_column(
cupy.asarray(arbitrary, dtype=arb_dtype),
nan_as_null=nan_as_null,
dtype=dtype,
)
elif arbitrary.dtype.kind in ("u", "i"):
data = as_column(
cupy.asarray(arbitrary), nan_as_null=nan_as_null, dtype=dtype
)
elif isinstance(arbitrary.dtype, pd.PeriodDtype):
raise NotImplementedError(
"cuDF does not yet support `PeriodDtype`"
)
else:
if cudf.get_option(
"mode.pandas_compatible"
) and _is_pandas_nullable_extension_dtype(arbitrary.dtype):
raise NotImplementedError("not supported")
pyarrow_array = pa.array(arbitrary, from_pandas=nan_as_null)
if arbitrary.dtype == cudf.dtype("object") and cudf.dtype(
pyarrow_array.type.to_pandas_dtype()
) != cudf.dtype(arbitrary.dtype):
raise MixedTypeError("Cannot create column with mixed types")
if isinstance(pyarrow_array.type, pa.Decimal128Type):
pyarrow_type = cudf.Decimal128Dtype.from_arrow(
pyarrow_array.type
)
else:
pyarrow_type = arbitrary.dtype
data = as_column(pyarrow_array, dtype=pyarrow_type)
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, (pd.Timestamp, pd.Timedelta)):
# This will always treat NaTs as nulls since it's not technically a
# discrete value like NaN
length = length or 1
data = as_column(
pa.array(pd.Series([arbitrary] * length), from_pandas=True)
)
if dtype is not None:
data = data.astype(dtype)
elif np.isscalar(arbitrary) and not isinstance(arbitrary, memoryview):
length = length or 1
if (
(nan_as_null is True)
and isinstance(arbitrary, (np.floating, float))
and np.isnan(arbitrary)
):
arbitrary = None
if dtype is None:
dtype = cudf.dtype("float64")
data = as_column(full(length, arbitrary, dtype=dtype))
if not nan_as_null and not is_decimal_dtype(data.dtype):
if np.issubdtype(data.dtype, np.floating):
data = data.fillna(np.nan)
elif np.issubdtype(data.dtype, np.datetime64):
data = data.fillna(np.datetime64("NaT"))
elif hasattr(arbitrary, "__array_interface__"):
# CUDF assumes values are always contiguous
desc = arbitrary.__array_interface__
shape = desc["shape"]
arb_dtype = np.dtype(desc["typestr"])
# CUDF assumes values are always contiguous
if len(shape) > 1:
raise ValueError("Data must be 1-dimensional")
arbitrary = np.asarray(arbitrary)
# Handle case that `arbitrary` elements are cupy arrays
if (
shape
and shape[0]
and hasattr(arbitrary[0], "__cuda_array_interface__")
):
return as_column(
cupy.asarray(arbitrary, dtype=arbitrary[0].dtype),
nan_as_null=nan_as_null,
dtype=dtype,
length=length,
)
if not arbitrary.flags["C_CONTIGUOUS"]:
arbitrary = np.ascontiguousarray(arbitrary)
delayed_cast = False
if dtype is not None:
try:
dtype = np.dtype(dtype)
except TypeError:
# Some `dtype`'s can't be parsed by `np.dtype`
# for which we will have to cast after the column
# has been constructed.
delayed_cast = True
else:
arbitrary = arbitrary.astype(dtype)
if arb_dtype.kind == "M":
time_unit = get_time_unit(arbitrary)
cast_dtype = time_unit in ("D", "W", "M", "Y")
if cast_dtype:
arbitrary = arbitrary.astype(cudf.dtype("datetime64[s]"))
buffer = as_buffer(arbitrary.view("|u1"))
mask = None
if nan_as_null is None or nan_as_null is True:
data = build_column(buffer, dtype=arbitrary.dtype)
data = _make_copy_replacing_NaT_with_null(data)
mask = data.mask
data = build_column(data=buffer, mask=mask, dtype=arbitrary.dtype)
elif arb_dtype.kind == "m":
time_unit = get_time_unit(arbitrary)
cast_dtype = time_unit in ("D", "W", "M", "Y")
if cast_dtype:
arbitrary = arbitrary.astype(cudf.dtype("timedelta64[s]"))
buffer = as_buffer(arbitrary.view("|u1"))
mask = None
if nan_as_null is None or nan_as_null is True:
data = build_column(buffer, dtype=arbitrary.dtype)
data = _make_copy_replacing_NaT_with_null(data)
mask = data.mask
data = cudf.core.column.timedelta.TimeDeltaColumn(
data=buffer,
size=len(arbitrary),
mask=mask,
dtype=arbitrary.dtype,
)
elif (
arbitrary.size != 0
and arb_dtype.kind in ("O")
and isinstance(arbitrary[0], pd._libs.interval.Interval)
):
# changing from pd array to series,possible arrow bug
interval_series = pd.Series(arbitrary)
data = as_column(
pa.Array.from_pandas(interval_series),
dtype=arbitrary.dtype,
)
if dtype is not None:
data = data.astype(dtype)
elif arb_dtype.kind in ("O", "U"):
data = as_column(pa.array(arbitrary), dtype=arbitrary.dtype)
# There is no cast operation available for pa.Array from int to
# str, Hence instead of handling in pa.Array block, we
# will have to type-cast here.
if dtype is not None:
data = data.astype(dtype)
elif arb_dtype.kind in ("f"):
if arb_dtype == np.dtype("float16"):
arb_dtype = np.dtype("float32")
arb_dtype = cudf.dtype(arb_dtype if dtype is None else dtype)
data = as_column(
cupy.asarray(arbitrary, dtype=arb_dtype),
nan_as_null=nan_as_null,
)
else:
data = as_column(cupy.asarray(arbitrary), nan_as_null=nan_as_null)
if delayed_cast:
data = data.astype(cudf.dtype(dtype))
elif isinstance(arbitrary, pd.arrays.PandasArray):
if cudf.get_option(
"mode.pandas_compatible"
) and _is_pandas_nullable_extension_dtype(arbitrary.dtype):
raise NotImplementedError("not supported")
if is_categorical_dtype(arbitrary.dtype):
arb_dtype = arbitrary.dtype
else:
if arbitrary.dtype == pd.StringDtype():
arb_dtype = cudf.dtype("O")
else:
arb_dtype = (
cudf.dtype("float32")
if arbitrary.dtype == "float16"
else cudf.dtype(arbitrary.dtype)
)
if arb_dtype != arbitrary.dtype.numpy_dtype:
arbitrary = arbitrary.astype(arb_dtype)
if (
arbitrary.size != 0
and isinstance(arbitrary[0], pd._libs.interval.Interval)
and arb_dtype.kind in ("O")
):
# changing from pd array to series,possible arrow bug
interval_series = pd.Series(arbitrary)
data = as_column(
pa.Array.from_pandas(interval_series), dtype=arb_dtype
)
elif arb_dtype.kind in ("O", "U"):
pyarrow_array = pa.Array.from_pandas(arbitrary)
if not isinstance(
pyarrow_array,
(
pa.ListArray,
pa.StructArray,
pa.NullArray,
pa.Decimal128Array,
pa.StringArray,
pa.BooleanArray,
),
):
raise MixedTypeError("Cannot create column with mixed types")
data = as_column(pyarrow_array, dtype=arb_dtype)
else:
data = as_column(
pa.array(
arbitrary,
from_pandas=True if nan_as_null is None else nan_as_null,
),
nan_as_null=nan_as_null,
)
if dtype is not None:
data = data.astype(dtype)
elif isinstance(arbitrary, pd.arrays.SparseArray):
raise NotImplementedError(
f"{arbitrary.dtype} is not supported. Convert first to "
f"{arbitrary.dtype.subtype}."
)
elif isinstance(arbitrary, memoryview):
data = as_column(
np.asarray(arbitrary), dtype=dtype, nan_as_null=nan_as_null
)
elif isinstance(arbitrary, cudf.Scalar):
data = ColumnBase.from_scalar(arbitrary, length if length else 1)
elif isinstance(arbitrary, pd.core.arrays.masked.BaseMaskedArray):
if cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError("not supported")
data = as_column(pa.Array.from_pandas(arbitrary), dtype=dtype)
elif (
(
isinstance(arbitrary, pd.DatetimeIndex)
and isinstance(arbitrary.dtype, pd.DatetimeTZDtype)
)
or (
isinstance(arbitrary, pd.IntervalIndex)
and is_datetime64tz_dtype(arbitrary.dtype.subtype)
)
or (
isinstance(arbitrary, pd.CategoricalIndex)
and isinstance(
arbitrary.dtype.categories.dtype, pd.DatetimeTZDtype
)
)
):
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
elif isinstance(
arbitrary, (pd.core.arrays.period.PeriodArray, pd.PeriodIndex)
):
raise NotImplementedError(
f"cuDF does not yet support {type(arbitrary).__name__}"
)
elif (
cudf.get_option("mode.pandas_compatible")
and isinstance(arbitrary, (pd.DatetimeIndex, pd.TimedeltaIndex))
and arbitrary.freq is not None
):
raise NotImplementedError("freq is not implemented yet")
else:
try:
data = as_column(
memoryview(arbitrary), dtype=dtype, nan_as_null=nan_as_null
)
except TypeError:
if dtype is not None:
# Arrow throws a type error if the input is of
# mixed-precision and cannot fit into the provided
# decimal type properly, see:
# https://github.com/apache/arrow/pull/9948
# Hence we should let the exception propagate to
# the user.
if isinstance(dtype, cudf.core.dtypes.Decimal128Dtype):
data = pa.array(
arbitrary,
type=pa.decimal128(
precision=dtype.precision, scale=dtype.scale
),
)
return cudf.core.column.Decimal128Column.from_arrow(data)
elif isinstance(dtype, cudf.core.dtypes.Decimal64Dtype):
data = pa.array(
arbitrary,
type=pa.decimal128(
precision=dtype.precision, scale=dtype.scale
),
)
return cudf.core.column.Decimal64Column.from_arrow(data)
elif isinstance(dtype, cudf.core.dtypes.Decimal32Dtype):
data = pa.array(
arbitrary,
type=pa.decimal128(
precision=dtype.precision, scale=dtype.scale
),
)
return cudf.core.column.Decimal32Column.from_arrow(data)
pa_type = None
np_type = None
try:
if dtype is not None:
if is_categorical_dtype(dtype) or is_interval_dtype(dtype):
raise TypeError
if is_datetime64tz_dtype(dtype):
raise NotImplementedError(
"Use `tz_localize()` to construct "
"timezone aware data."
)
elif is_datetime64_dtype(dtype):
# Error checking only, actual construction happens
# below.
pa_array = pa.array(arbitrary)
if (
isinstance(pa_array.type, pa.TimestampType)
and pa_array.type.tz is not None
):
raise NotImplementedError(
"cuDF does not yet support timezone-aware "
"datetimes"
)
if is_list_dtype(dtype):
data = pa.array(arbitrary)
if type(data) not in (pa.ListArray, pa.NullArray):
raise ValueError(
"Cannot create list column from given data"
)
return as_column(data, nan_as_null=nan_as_null)
elif isinstance(
dtype, cudf.StructDtype
) and not isinstance(dtype, cudf.IntervalDtype):
data = pa.array(arbitrary, type=dtype.to_arrow())
return as_column(data, nan_as_null=nan_as_null)
elif isinstance(dtype, cudf.core.dtypes.Decimal128Dtype):
data = pa.array(
arbitrary,
type=pa.decimal128(
precision=dtype.precision, scale=dtype.scale
),
)
return cudf.core.column.Decimal128Column.from_arrow(
data
)
elif isinstance(dtype, cudf.core.dtypes.Decimal64Dtype):
data = pa.array(
arbitrary,
type=pa.decimal128(
precision=dtype.precision, scale=dtype.scale
),
)
return cudf.core.column.Decimal64Column.from_arrow(
data
)
elif isinstance(dtype, cudf.core.dtypes.Decimal32Dtype):
data = pa.array(
arbitrary,
type=pa.decimal128(
precision=dtype.precision, scale=dtype.scale
),
)
return cudf.core.column.Decimal32Column.from_arrow(
data
)
if is_bool_dtype(dtype):
# Need this special case handling for bool dtypes,
# since 'boolean' & 'pd.BooleanDtype' are not
# understood by np.dtype below.
dtype = "bool"
np_dtype = np.dtype(dtype)
if np_dtype.kind in {"m", "M"}:
unit = np.datetime_data(np_dtype)[0]
if unit not in {"ns", "us", "ms", "s", "D"}:
raise NotImplementedError(
f"{dtype=} is not supported."
)
np_type = np_dtype.type
pa_type = np_to_pa_dtype(np_dtype)
else:
# By default cudf constructs a 64-bit column. Setting
# the `default_*_bitwidth` to 32 will result in a 32-bit
# column being created.
if (
cudf.get_option("default_integer_bitwidth")
and infer_dtype(arbitrary) == "integer"
):
pa_type = np_to_pa_dtype(
_maybe_convert_to_default_type("int")
)
if cudf.get_option(
"default_float_bitwidth"
) and infer_dtype(arbitrary) in (
"floating",
"mixed-integer-float",
):
pa_type = np_to_pa_dtype(
_maybe_convert_to_default_type("float")
)
if (
cudf.get_option("mode.pandas_compatible")
and isinstance(
arbitrary, (pd.Index, pd.api.extensions.ExtensionArray)
)
and _is_pandas_nullable_extension_dtype(arbitrary.dtype)
):
raise NotImplementedError("not supported")
pyarrow_array = pa.array(
arbitrary,
type=pa_type,
from_pandas=True if nan_as_null is None else nan_as_null,
)
if (
isinstance(pyarrow_array, pa.NullArray)
and pa_type is None
and dtype is None
and getattr(arbitrary, "dtype", None)
== cudf.dtype("object")
):
# pa.array constructor returns a NullArray
# for empty arrays, instead of a StringArray.
# This issue is only specific to this dtype,
# all other dtypes, result in their corresponding
# arrow array creation.
dtype = cudf.dtype("str")
pyarrow_array = pyarrow_array.cast(np_to_pa_dtype(dtype))
if (
isinstance(arbitrary, pd.Index)
and arbitrary.dtype == cudf.dtype("object")
and (
cudf.dtype(pyarrow_array.type.to_pandas_dtype())
!= cudf.dtype(arbitrary.dtype)
)
):
raise MixedTypeError(
"Cannot create column with mixed types"
)
if (
cudf.get_option("mode.pandas_compatible")
and pa.types.is_integer(pyarrow_array.type)
and pyarrow_array.null_count
):
pyarrow_array = pyarrow_array.cast("float64").fill_null(
np.nan
)
data = as_column(
pyarrow_array,
dtype=dtype,
nan_as_null=nan_as_null,
)
except (pa.ArrowInvalid, pa.ArrowTypeError, TypeError) as e:
if isinstance(e, MixedTypeError):
raise TypeError(str(e))
if is_categorical_dtype(dtype):
sr = pd.Series(arbitrary, dtype="category")
data = as_column(sr, nan_as_null=nan_as_null, dtype=dtype)
elif np_type == np.str_:
sr = pd.Series(arbitrary, dtype="str")
data = as_column(sr, nan_as_null=nan_as_null)
elif is_interval_dtype(dtype):
sr = pd.Series(arbitrary, dtype="interval")
data = as_column(sr, nan_as_null=nan_as_null, dtype=dtype)
elif (
isinstance(arbitrary, Sequence)
and len(arbitrary) > 0
and any(
cudf.utils.dtypes.is_column_like(arb)
for arb in arbitrary
)
):
return cudf.core.column.ListColumn.from_sequences(
arbitrary
)
elif isinstance(arbitrary, abc.Iterable) or isinstance(
arbitrary, abc.Sequence
):
data = as_column(
_construct_array(arbitrary, dtype),
dtype=dtype,
nan_as_null=nan_as_null,
)
else:
raise e
return data
def _construct_array(
arbitrary: Any, dtype: Optional[Dtype]
) -> Union[np.ndarray, cupy.ndarray, pd.api.extensions.ExtensionArray]:
"""
Construct a CuPy/NumPy/Pandas array from `arbitrary`
"""
try:
dtype = dtype if dtype is None else cudf.dtype(dtype)
arbitrary = cupy.asarray(arbitrary, dtype=dtype)
except (TypeError, ValueError):
native_dtype = dtype
inferred_dtype = infer_dtype(arbitrary, skipna=False)
if (
dtype is None
and not cudf._lib.scalar._is_null_host_scalar(arbitrary)
and inferred_dtype
in (
"mixed",
"mixed-integer",
)
):
native_dtype = "object"
if inferred_dtype == "interval":
# Only way to construct an Interval column.
return pd.array(arbitrary)
elif (
inferred_dtype == "string" and getattr(dtype, "kind", None) == "M"
):
# We may have date-like strings with timezones
try:
pd_arbitrary = pd.to_datetime(arbitrary)
if isinstance(pd_arbitrary.dtype, pd.DatetimeTZDtype):
raise NotImplementedError(
"cuDF does not yet support timezone-aware datetimes"
)
return pd_arbitrary.to_numpy()
except pd.errors.OutOfBoundsDatetime:
# https://github.com/pandas-dev/pandas/issues/55096
pass
arbitrary = np.asarray(
arbitrary,
dtype=native_dtype
if native_dtype is None
else np.dtype(native_dtype),
)
return arbitrary
def _mask_from_cuda_array_interface_desc(obj) -> Union[Buffer, None]:
desc = obj.__cuda_array_interface__
mask = desc.get("mask", None)
if mask is not None:
desc = mask.__cuda_array_interface__
ptr = desc["data"][0]
nelem = desc["shape"][0]
typestr = desc["typestr"]
typecode = typestr[1]
if typecode == "t":
mask_size = bitmask_allocation_size_bytes(nelem)
mask = as_buffer(data=ptr, size=mask_size, owner=obj)
elif typecode == "b":
col = as_column(mask)
mask = bools_to_mask(col)
else:
raise NotImplementedError(
f"Cannot infer mask from typestr {typestr}"
)
return mask
def serialize_columns(columns) -> Tuple[List[dict], List]:
"""
Return the headers and frames resulting
from serializing a list of Column
Parameters
----------
columns : list
list of Columns to serialize
Returns
-------
headers : list
list of header metadata for each Column
frames : list
list of frames
"""
headers: List[Dict[Any, Any]] = []
frames = []
if len(columns) > 0:
header_columns = [c.serialize() for c in columns]
headers, column_frames = zip(*header_columns)
for f in column_frames:
frames.extend(f)
return headers, frames
def deserialize_columns(headers: List[dict], frames: List) -> List[ColumnBase]:
"""
Construct a list of Columns from a list of headers
and frames.
"""
columns = []
for meta in headers:
col_frame_count = meta["frame_count"]
col_typ = pickle.loads(meta["type-serialized"])
colobj = col_typ.deserialize(meta, frames[:col_frame_count])
columns.append(colobj)
# Advance frames
frames = frames[col_frame_count:]
return columns
def arange(
start: Union[int, float],
stop: Optional[Union[int, float]] = None,
step: Union[int, float] = 1,
dtype=None,
) -> cudf.core.column.NumericalColumn:
"""
Returns a column with evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop).
The first three arguments are mapped like the range built-in function,
i.e. start and step are optional.
Parameters
----------
start : int/float
Start of the interval.
stop : int/float, default is None
Stop of the interval.
step : int/float, default 1
Step width between each pair of consecutive values.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
cudf.core.column.NumericalColumn
Examples
--------
>>> import cudf
>>> col = cudf.core.column.arange(2, 7, 1, dtype='int16')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7ff7998f8b90>
>>> cudf.Series(col)
0 2
1 3
2 4
3 5
4 6
dtype: int16
"""
if stop is None:
stop = start
start = 0
if step is None:
step = 1
size = len(range(int(start), int(stop), int(step)))
if size == 0:
return as_column([], dtype=dtype)
return libcudf.filling.sequence(
size,
as_device_scalar(start, dtype=dtype),
as_device_scalar(step, dtype=dtype),
)
def full(
size: int, fill_value: ScalarLike, dtype: Optional[Dtype] = None
) -> ColumnBase:
"""
Returns a column of given size and dtype, filled with a given value.
Parameters
----------
size : int
size of the expected column.
fill_value : scalar
A scalar value to fill a new array.
dtype : default None
Data type specifier. It is inferred from other arguments by default.
Returns
-------
Column
Examples
--------
>>> import cudf
>>> col = cudf.core.column.full(size=5, fill_value=7, dtype='int8')
>>> col
<cudf.core.column.numerical.NumericalColumn object at 0x7fa0912e8b90>
>>> cudf.Series(col)
0 7
1 7
2 7
3 7
4 7
dtype: int8
"""
return ColumnBase.from_scalar(cudf.Scalar(fill_value, dtype), size)
def concat_columns(objs: "MutableSequence[ColumnBase]") -> ColumnBase:
"""Concatenate a sequence of columns."""
if len(objs) == 0:
dtype = cudf.dtype(None)
return column_empty(0, dtype=dtype, masked=True)
# If all columns are `NumericalColumn` with different dtypes,
# we cast them to a common dtype.
# Notice, we can always cast pure null columns
not_null_col_dtypes = [o.dtype for o in objs if o.valid_count]
if len(not_null_col_dtypes) and all(
_is_non_decimal_numeric_dtype(dtyp)
and np.issubdtype(dtyp, np.datetime64)
for dtyp in not_null_col_dtypes
):
# Use NumPy to find a common dtype
common_dtype = np.find_common_type(not_null_col_dtypes, [])
# Cast all columns to the common dtype
objs = [obj.astype(common_dtype) for obj in objs]
# Find the first non-null column:
head = next((obj for obj in objs if obj.valid_count), objs[0])
for i, obj in enumerate(objs):
# Check that all columns are the same type:
if not is_dtype_equal(obj.dtype, head.dtype):
# if all null, cast to appropriate dtype
if obj.valid_count == 0:
objs[i] = column_empty_like(
head, dtype=head.dtype, masked=True, newsize=len(obj)
)
else:
raise ValueError("All columns must be the same type")
# TODO: This logic should be generalized to a dispatch to
# ColumnBase._concat so that all subclasses can override necessary
# behavior. However, at the moment it's not clear what that API should look
# like, so CategoricalColumn simply implements a minimal working API.
if all(is_categorical_dtype(o.dtype) for o in objs):
return cudf.core.column.categorical.CategoricalColumn._concat(
cast(
MutableSequence[
cudf.core.column.categorical.CategoricalColumn
],
objs,
)
)
newsize = sum(map(len, objs))
if newsize > libcudf.MAX_COLUMN_SIZE:
raise MemoryError(
f"Result of concat cannot have "
f"size > {libcudf.MAX_COLUMN_SIZE_STR}"
)
elif newsize == 0:
return column_empty(0, head.dtype, masked=True)
# Filter out inputs that have 0 length, then concatenate.
return libcudf.concat.concat_columns([o for o in objs if len(o)])
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/struct.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
from functools import cached_property
from typing import Optional
import pandas as pd
import pyarrow as pa
import cudf
from cudf._typing import Dtype
from cudf.api.types import is_struct_dtype
from cudf.core.column import ColumnBase, build_struct_column
from cudf.core.column.methods import ColumnMethods
from cudf.core.dtypes import StructDtype
from cudf.core.missing import NA
class StructColumn(ColumnBase):
"""
Column that stores fields of values.
Every column has n children, where n is
the number of fields in the Struct Dtype.
"""
dtype: StructDtype
@property
def base_size(self):
if self.base_children:
return len(self.base_children[0])
else:
return self.size + self.offset
def to_arrow(self):
children = [
pa.nulls(len(child))
if len(child) == child.null_count
else child.to_arrow()
for child in self.children
]
pa_type = pa.struct(
{
field: child.type
for field, child in zip(self.dtype.fields, children)
}
)
if self.nullable:
buffers = (pa.py_buffer(self.mask.memoryview()),)
else:
buffers = (None,)
return pa.StructArray.from_buffers(
pa_type, len(self), buffers, children=children
)
def to_pandas(
self, index: Optional[pd.Index] = None, **kwargs
) -> pd.Series:
# We cannot go via Arrow's `to_pandas` because of the following issue:
# https://issues.apache.org/jira/browse/ARROW-12680
pd_series = pd.Series(self.to_arrow().tolist(), dtype="object")
if index is not None:
pd_series.index = index
return pd_series
@cached_property
def memory_usage(self):
n = 0
if self.nullable:
n += cudf._lib.null_mask.bitmask_allocation_size_bytes(self.size)
for child in self.children:
n += child.memory_usage
return n
def element_indexing(self, index: int):
result = super().element_indexing(index)
return {
field: value
for field, value in zip(self.dtype.fields, result.values())
}
def __setitem__(self, key, value):
if isinstance(value, dict):
# filling in fields not in dict
for field in self.dtype.fields:
value[field] = value.get(field, NA)
value = cudf.Scalar(value, self.dtype)
super().__setitem__(key, value)
def copy(self, deep=True):
# Since struct columns are immutable, both deep and
# shallow copies share the underlying device data and mask.
result = super().copy(deep=False)
if deep:
result = result._rename_fields(self.dtype.fields.keys())
return result
def _rename_fields(self, names):
"""
Return a StructColumn with the same field values as this StructColumn,
but with the field names equal to `names`.
"""
dtype = cudf.core.dtypes.StructDtype(
{name: col.dtype for name, col in zip(names, self.children)}
)
return StructColumn(
data=None,
size=self.size,
dtype=dtype,
mask=self.base_mask,
offset=self.offset,
null_count=self.null_count,
children=self.base_children,
)
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
"Structs are not yet supported via `__cuda_array_interface__`"
)
def _with_type_metadata(self: StructColumn, dtype: Dtype) -> StructColumn:
from cudf.core.column import IntervalColumn
from cudf.core.dtypes import IntervalDtype
# Check IntervalDtype first because it's a subclass of StructDtype
if isinstance(dtype, IntervalDtype):
return IntervalColumn.from_struct_column(self, closed=dtype.closed)
elif isinstance(dtype, StructDtype):
return build_struct_column(
names=dtype.fields.keys(),
children=tuple(
self.base_children[i]._with_type_metadata(dtype.fields[f])
for i, f in enumerate(dtype.fields.keys())
),
mask=self.base_mask,
size=self.size,
offset=self.offset,
null_count=self.null_count,
)
return self
class StructMethods(ColumnMethods):
"""
Struct methods for Series
"""
_column: StructColumn
def __init__(self, parent=None):
if not is_struct_dtype(parent.dtype):
raise AttributeError(
"Can only use .struct accessor with a 'struct' dtype"
)
super().__init__(parent=parent)
def field(self, key):
"""
Extract children of the specified struct column
in the Series
Parameters
----------
key: int or str
index/position or field name of the respective
struct column
Returns
-------
Series
Examples
--------
>>> s = cudf.Series([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}])
>>> s.struct.field(0)
0 1
1 3
dtype: int64
>>> s.struct.field('a')
0 1
1 3
dtype: int64
"""
fields = list(self._column.dtype.fields.keys())
if key in fields:
pos = fields.index(key)
return self._return_or_inplace(self._column.children[pos])
else:
if isinstance(key, int):
try:
return self._return_or_inplace(self._column.children[key])
except IndexError:
raise IndexError(f"Index {key} out of range")
else:
raise KeyError(
f"Field '{key}' is not found in the set of existing keys."
)
def explode(self):
"""
Return a DataFrame whose columns are the fields of this struct Series.
Notes
-----
Note that a copy of the columns is made.
Examples
--------
>>> s
0 {'a': 1, 'b': 'x'}
1 {'a': 2, 'b': 'y'}
2 {'a': 3, 'b': 'z'}
3 {'a': 4, 'b': 'a'}
dtype: struct
>>> s.struct.explode()
a b
0 1 x
1 2 y
2 3 z
3 4 a
"""
return cudf.DataFrame._from_data(
cudf.core.column_accessor.ColumnAccessor(
{
name: col.copy(deep=True)
for name, col in zip(
self._column.dtype.fields, self._column.children
)
}
)
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/lists.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from functools import cached_property
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
import pyarrow as pa
from typing_extensions import Self
import cudf
from cudf._lib.copying import segmented_gather
from cudf._lib.lists import (
concatenate_list_elements,
concatenate_rows,
contains_scalar,
count_elements,
distinct,
extract_element_column,
extract_element_scalar,
index_of_column,
index_of_scalar,
sort_lists,
)
from cudf._lib.strings.convert.convert_lists import format_list_column
from cudf._lib.types import size_type_dtype
from cudf._typing import ColumnBinaryOperand, ColumnLike, Dtype, ScalarLike
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
is_list_dtype,
is_scalar,
)
from cudf.core.column import ColumnBase, as_column, column
from cudf.core.column.methods import ColumnMethods, ParentType
from cudf.core.dtypes import ListDtype
from cudf.core.missing import NA
class ListColumn(ColumnBase):
dtype: ListDtype
_VALID_BINARY_OPERATIONS = {"__add__", "__radd__"}
def __init__(
self,
size,
dtype,
mask=None,
offset=0,
null_count=None,
children=(),
):
super().__init__(
None,
size,
dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
@cached_property
def memory_usage(self):
n = 0
if self.nullable:
n += cudf._lib.null_mask.bitmask_allocation_size_bytes(self.size)
child0_size = (self.size + 1) * self.base_children[0].dtype.itemsize
current_base_child = self.base_children[1]
current_offset = self.offset
n += child0_size
while type(current_base_child) is ListColumn:
child0_size = (
current_base_child.size + 1 - current_offset
) * current_base_child.base_children[0].dtype.itemsize
current_offset = current_base_child.base_children[
0
].element_indexing(current_offset)
n += child0_size
current_base_child = current_base_child.base_children[1]
n += (
current_base_child.size - current_offset
) * current_base_child.dtype.itemsize
if current_base_child.nullable:
n += cudf._lib.null_mask.bitmask_allocation_size_bytes(
current_base_child.size
)
return n
def __setitem__(self, key, value):
if isinstance(value, list):
value = cudf.Scalar(value)
if isinstance(value, cudf.Scalar):
if value.dtype != self.dtype:
raise TypeError("list nesting level mismatch")
elif value is NA:
value = cudf.Scalar(value, dtype=self.dtype)
else:
raise ValueError(f"Can not set {value} into ListColumn")
super().__setitem__(key, value)
@property
def base_size(self):
# in some cases, libcudf will return an empty ListColumn with no
# indices; in these cases, we must manually set the base_size to 0 to
# avoid it being negative
return max(0, len(self.base_children[0]) - 1)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
# Lists only support __add__, which concatenates lists.
reflect, op = self._check_reflected_op(op)
other = self._wrap_binop_normalization(other)
if other is NotImplemented:
return NotImplemented
if isinstance(other.dtype, ListDtype):
if op == "__add__":
return concatenate_rows([self, other])
else:
raise NotImplementedError(
"Lists concatenation for this operation is not yet"
"supported"
)
else:
raise TypeError("can only concatenate list to list")
@property
def elements(self):
"""
Column containing the elements of each list (may itself be a
ListColumn)
"""
return self.children[1]
@property
def offsets(self):
"""
Integer offsets to elements specifying each row of the ListColumn
"""
return self.children[0]
def to_arrow(self):
offsets = self.offsets.to_arrow()
elements = (
pa.nulls(len(self.elements))
if len(self.elements) == self.elements.null_count
else self.elements.to_arrow()
)
pa_type = pa.list_(elements.type)
if self.nullable:
nbuf = pa.py_buffer(self.mask.memoryview())
buffers = (nbuf, offsets.buffers()[1])
else:
buffers = offsets.buffers()
return pa.ListArray.from_buffers(
pa_type, len(self), buffers, children=[elements]
)
def set_base_data(self, value):
if value is not None:
raise RuntimeError(
"ListColumn's do not use data attribute of Column, use "
"`set_base_children` instead"
)
else:
super().set_base_data(value)
def set_base_children(self, value: Tuple[ColumnBase, ...]):
super().set_base_children(value)
_, values = value
self._dtype = cudf.ListDtype(element_type=values.dtype)
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
"Lists are not yet supported via `__cuda_array_interface__`"
)
def normalize_binop_value(self, other):
if not isinstance(other, ListColumn):
return NotImplemented
return other
def _with_type_metadata(
self: "cudf.core.column.ListColumn", dtype: Dtype
) -> "cudf.core.column.ListColumn":
if isinstance(dtype, ListDtype):
return column.build_list_column(
indices=self.base_children[0],
elements=self.base_children[1]._with_type_metadata(
dtype.element_type
),
mask=self.base_mask,
size=self.size,
offset=self.offset,
null_count=self.null_count,
)
return self
def copy(self, deep: bool = True):
# Since list columns are immutable, both deep and shallow copies share
# the underlying device data and mask.
return super().copy(deep=False)
def leaves(self):
if isinstance(self.elements, ListColumn):
return self.elements.leaves()
else:
return self.elements
@classmethod
def from_sequences(
cls, arbitrary: Sequence[ColumnLike]
) -> "cudf.core.column.ListColumn":
"""
Create a list column for list of column-like sequences
"""
data_col = column.column_empty(0)
mask_col = []
offset_col = [0]
offset = 0
# Build Data, Mask & Offsets
for data in arbitrary:
if cudf._lib.scalar._is_null_host_scalar(data):
mask_col.append(False)
offset_col.append(offset)
else:
mask_col.append(True)
data_col = data_col.append(as_column(data))
offset += len(data)
offset_col.append(offset)
offset_col = column.as_column(offset_col, dtype=size_type_dtype)
# Build ListColumn
res = cls(
size=len(arbitrary),
dtype=cudf.ListDtype(data_col.dtype),
mask=cudf._lib.transform.bools_to_mask(as_column(mask_col)),
offset=0,
null_count=0,
children=(offset_col, data_col),
)
return res
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
"""
Create a strings column from a list column
"""
lc = self._transform_leaves(
lambda col, dtype: col.as_string_column(dtype), dtype
)
# Separator strings to match the Python format
separators = as_column([", ", "[", "]"])
# Call libcudf to format the list column
return format_list_column(lc, separators)
def _transform_leaves(self, func, *args, **kwargs) -> Self:
# return a new list column with the same nested structure
# as ``self``, but with the leaf column transformed
# by applying ``func`` to it
cc: List[ListColumn] = []
c: ColumnBase = self
while isinstance(c, ListColumn):
cc.insert(0, c)
c = c.children[1]
lc = func(c, *args, **kwargs)
# Rebuild the list column replacing just the leaf child
for c in cc:
o = c.children[0]
lc = cudf.core.column.ListColumn( # type: ignore
size=c.size,
dtype=cudf.ListDtype(lc.dtype),
mask=c.mask,
offset=c.offset,
null_count=c.null_count,
children=(o, lc),
)
return lc
class ListMethods(ColumnMethods):
"""
List methods for Series
"""
_column: ListColumn
def __init__(self, parent: ParentType):
if not is_list_dtype(parent.dtype):
raise AttributeError(
"Can only use .list accessor with a 'list' dtype"
)
super().__init__(parent=parent)
def get(
self,
index: int,
default: Optional[Union[ScalarLike, ColumnLike]] = None,
) -> ParentType:
"""
Extract element at the given index from each list in a Series of lists.
``index`` can be an integer or a sequence of integers. If
``index`` is an integer, the element at position ``index`` is
extracted from each list. If ``index`` is a sequence, it must
be of the same length as the Series, and ``index[i]``
specifies the position of the element to extract from the
``i``-th list in the Series.
If the index is out of bounds for any list, return <NA> or, if
provided, ``default``. Thus, this method never raises an
``IndexError``.
Parameters
----------
index : int or sequence of ints
default : scalar, optional
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
>>> s.list.get(-1)
0 3
1 5
2 6
dtype: int64
>>> s = cudf.Series([[1, 2], [3, 4, 5], [4, 5, 6]])
>>> s.list.get(2)
0 <NA>
1 5
2 6
dtype: int64
>>> s.list.get(2, default=0)
0 0
1 5
2 6
dtype: int64
>>> s.list.get([0, 1, 2])
0 1
1 4
2 6
dtype: int64
"""
if is_scalar(index):
out = extract_element_scalar(self._column, cudf.Scalar(index))
else:
index = as_column(index)
out = extract_element_column(self._column, as_column(index))
if not (default is None or default is NA):
# determine rows for which `index` is out-of-bounds
lengths = count_elements(self._column)
out_of_bounds_mask = (np.negative(index) > lengths) | (
index >= lengths
)
# replace the value in those rows (should be NA) with `default`
if out_of_bounds_mask.any():
out = out._scatter_by_column(
out_of_bounds_mask, cudf.Scalar(default)
)
if out.dtype != self._column.dtype.element_type:
# libcudf doesn't maintain struct labels so we must transfer over
# manually from the input column if we lost some information
# somewhere. Not doing this unilaterally since the cost is
# non-zero..
out = out._with_type_metadata(self._column.dtype.element_type)
return self._return_or_inplace(out)
def contains(self, search_key: ScalarLike) -> ParentType:
"""
Returns boolean values indicating whether the specified scalar
is an element of each row.
Parameters
----------
search_key : scalar
element being searched for in each row of the list column
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
>>> s.list.contains(4)
Series([False, True, True])
dtype: bool
"""
return self._return_or_inplace(
contains_scalar(self._column, cudf.Scalar(search_key))
)
def index(self, search_key: Union[ScalarLike, ColumnLike]) -> ParentType:
"""
Returns integers representing the index of the search key for each row.
If ``search_key`` is a sequence, it must be the same length as the
Series and ``search_key[i]`` represents the search key for the
``i``-th row of the Series.
If the search key is not contained in a row, -1 is returned. If either
the row or the search key are null, <NA> is returned. If the search key
is contained multiple times, the smallest matching index is returned.
Parameters
----------
search_key : scalar or sequence of scalars
Element or elements being searched for in each row of the list
column
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], [3, 4, 5], [4, 5, 6]])
>>> s.list.index(4)
0 -1
1 1
2 0
dtype: int32
>>> s = cudf.Series([["a", "b", "c"], ["x", "y", "z"]])
>>> s.list.index(["b", "z"])
0 1
1 2
dtype: int32
>>> s = cudf.Series([[4, 5, 6], None, [-3, -2, -1]])
>>> s.list.index([None, 3, -2])
0 <NA>
1 <NA>
2 1
dtype: int32
"""
if is_scalar(search_key):
return self._return_or_inplace(
index_of_scalar(self._column, cudf.Scalar(search_key))
)
else:
return self._return_or_inplace(
index_of_column(self._column, as_column(search_key))
)
@property
def leaves(self) -> ParentType:
"""
From a Series of (possibly nested) lists, obtain the elements from
the innermost lists as a flat Series (one value per row).
Returns
-------
Series or Index
Examples
--------
>>> a = cudf.Series([[[1, None], [3, 4]], None, [[5, 6]]])
>>> a.list.leaves
0 1
1 <NA>
2 3
3 4
4 5
5 6
dtype: int64
"""
return self._return_or_inplace(
self._column.leaves(), retain_index=False
)
def len(self) -> ParentType:
"""
Computes the length of each element in the Series/Index.
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], None, [4, 5]])
>>> s
0 [1, 2, 3]
1 None
2 [4, 5]
dtype: list
>>> s.list.len()
0 3
1 <NA>
2 2
dtype: int32
"""
return self._return_or_inplace(count_elements(self._column))
def take(self, lists_indices: ColumnLike) -> ParentType:
"""
Collect list elements based on given indices.
Parameters
----------
lists_indices: Series-like of lists
Specifies what to collect from each row
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 2, 3], None, [4, 5]])
>>> s
0 [1, 2, 3]
1 None
2 [4, 5]
dtype: list
>>> s.list.take([[0, 1], [], []])
0 [1, 2]
1 None
2 []
dtype: list
"""
lists_indices_col = as_column(lists_indices)
if not isinstance(lists_indices_col, ListColumn):
raise ValueError("lists_indices should be list type array.")
if not lists_indices_col.size == self._column.size:
raise ValueError(
"lists_indices and list column is of different " "size."
)
if not _is_non_decimal_numeric_dtype(
lists_indices_col.children[1].dtype
) or not np.issubdtype(
lists_indices_col.children[1].dtype, np.integer
):
raise TypeError(
"lists_indices should be column of values of index types."
)
return self._return_or_inplace(
segmented_gather(self._column, lists_indices_col)
)
def unique(self) -> ParentType:
"""
Returns the unique elements in each list.
The ordering of elements is not guaranteed.
Returns
-------
Series or Index
Examples
--------
>>> s = cudf.Series([[1, 1, 2, None, None], None, [4, 4], []])
>>> s
0 [1.0, 1.0, 2.0, nan, nan]
1 None
2 [4.0, 4.0]
3 []
dtype: list
>>> s.list.unique() # Order of list element is not guaranteed
0 [1.0, 2.0, nan]
1 None
2 [4.0]
3 []
dtype: list
"""
if is_list_dtype(self._column.children[1].dtype):
raise NotImplementedError("Nested lists unique is not supported.")
return self._return_or_inplace(
distinct(self._column, nulls_equal=True, nans_all_equal=True)
)
def sort_values(
self,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
) -> ParentType:
"""
Sort each list by the values.
Sort the lists in ascending or descending order by some criterion.
Parameters
----------
ascending : bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {'first', 'last'}, default 'last'
'first' puts nulls at the beginning, 'last' puts nulls at the end.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, ..., n - 1.
Returns
-------
Series or Index with each list sorted
Notes
-----
Difference from pandas:
* Not supporting: `inplace`, `kind`
Examples
--------
>>> s = cudf.Series([[4, 2, None, 9], [8, 8, 2], [2, 1]])
>>> s.list.sort_values(ascending=True, na_position="last")
0 [2.0, 4.0, 9.0, nan]
1 [2.0, 8.0, 8.0]
2 [1.0, 2.0]
dtype: list
"""
if inplace:
raise NotImplementedError("`inplace` not currently implemented.")
if kind != "quicksort":
raise NotImplementedError("`kind` not currently implemented.")
if na_position not in {"first", "last"}:
raise ValueError(f"Unknown `na_position` value {na_position}")
if is_list_dtype(self._column.children[1].dtype):
raise NotImplementedError("Nested lists sort is not supported.")
return self._return_or_inplace(
sort_lists(self._column, ascending, na_position),
retain_index=not ignore_index,
)
def concat(self, dropna=True) -> ParentType:
"""
For a column with at least one level of nesting, concatenate the
lists in each row.
Parameters
----------
dropna: bool, optional
If True (default), ignores top-level null elements in each row.
If False, and top-level null elements are present, the resulting
row in the output is null.
Returns
-------
Series or Index
Examples
--------
>>> s1
0 [[1.0, 2.0], [3.0, 4.0, 5.0]]
1 [[6.0, None], [7.0], [8.0, 9.0]]
dtype: list
>>> s1.list.concat()
0 [1.0, 2.0, 3.0, 4.0, 5.0]
1 [6.0, None, 7.0, 8.0, 9.0]
dtype: list
Null values at the top-level in each row are dropped by default:
>>> s2
0 [[1.0, 2.0], None, [3.0, 4.0, 5.0]]
1 [[6.0, None], [7.0], [8.0, 9.0]]
dtype: list
>>> s2.list.concat()
0 [1.0, 2.0, 3.0, 4.0, 5.0]
1 [6.0, None, 7.0, 8.0, 9.0]
dtype: list
Use ``dropna=False`` to produce a null instead:
>>> s2.list.concat(dropna=False)
0 None
1 [6.0, nan, 7.0, 8.0, 9.0]
dtype: list
"""
return self._return_or_inplace(
concatenate_list_elements(self._column, dropna=dropna)
)
def astype(self, dtype):
"""
Return a new list Series with the leaf values casted
to the specified data type.
Parameters
----------
dtype: data type to cast leaves values to
Returns
-------
A new Series of lists
Examples
--------
>>> s = cudf.Series([[1, 2], [3, 4]])
>>> s.dtype
ListDtype(int64)
>>> s2 = s.list.astype("float64")
>>> s2.dtype
ListDtype(float64)
"""
return self._return_or_inplace(
self._column._transform_leaves(
lambda col, dtype: col.astype(dtype), dtype
)
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/methods.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
from typing import Optional, Union, overload
from typing_extensions import Literal
import cudf
from cudf.utils.utils import NotIterable
ParentType = Union["cudf.Series", "cudf.core.index.GenericIndex"]
class ColumnMethods(NotIterable):
_parent: ParentType
def __init__(self, parent: ParentType):
self._parent = parent
self._column = self._parent._column
@overload
def _return_or_inplace(
self,
new_col,
inplace: Literal[True],
expand: bool = False,
retain_index: bool = True,
) -> None:
...
@overload
def _return_or_inplace(
self,
new_col,
inplace: Literal[False],
expand: bool = False,
retain_index: bool = True,
) -> ParentType:
...
@overload
def _return_or_inplace(
self,
new_col,
expand: bool = False,
retain_index: bool = True,
) -> ParentType:
...
@overload
def _return_or_inplace(
self,
new_col,
inplace: bool = False,
expand: bool = False,
retain_index: bool = True,
) -> Optional[ParentType]:
...
def _return_or_inplace(
self, new_col, inplace=False, expand=False, retain_index=True
):
"""
Returns an object of the type of the column owner or updates the column
of the owner (Series or Index) to mimic an inplace operation
"""
if inplace:
self._parent._mimic_inplace(
self._parent.__class__._from_data(
{self._parent.name: new_col}
),
inplace=True,
)
return None
else:
if expand:
# This branch indicates the passed as new_col
# is a Table
table = new_col
if isinstance(self._parent, cudf.BaseIndex):
idx = self._parent._constructor_expanddim._from_data(table)
idx.names = None
return idx
else:
return self._parent._constructor_expanddim._from_data(
data=table, index=self._parent.index
)
elif isinstance(self._parent, cudf.Series):
if retain_index:
return cudf.Series(
new_col,
name=self._parent.name,
index=self._parent.index,
)
else:
return cudf.Series(new_col, name=self._parent.name)
elif isinstance(self._parent, cudf.BaseIndex):
return cudf.core.index.as_index(
new_col, name=self._parent.name
)
else:
return self._parent._mimic_inplace(new_col, inplace=False)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/timedelta.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
import datetime
from typing import Any, Optional, Sequence, cast
import numpy as np
import pandas as pd
import pyarrow as pa
import cudf
from cudf import _lib as libcudf
from cudf._typing import ColumnBinaryOperand, DatetimeLikeScalar, Dtype
from cudf.api.types import is_scalar, is_timedelta64_dtype
from cudf.core.buffer import Buffer, acquire_spill_lock
from cudf.core.column import ColumnBase, column, string
from cudf.utils.dtypes import np_to_pa_dtype
from cudf.utils.utils import _all_bools_with_nulls
_dtype_to_format_conversion = {
"timedelta64[ns]": "%D days %H:%M:%S",
"timedelta64[us]": "%D days %H:%M:%S",
"timedelta64[ms]": "%D days %H:%M:%S",
"timedelta64[s]": "%D days %H:%M:%S",
}
_unit_to_nanoseconds_conversion = {
"ns": 1,
"us": 1_000,
"ms": 1_000_000,
"s": 1_000_000_000,
"m": 60_000_000_000,
"h": 3_600_000_000_000,
"D": 86_400_000_000_000,
}
class TimeDeltaColumn(ColumnBase):
"""
Parameters
----------
data : Buffer
The Timedelta values
dtype : np.dtype
The data type
size : int
Size of memory allocation.
mask : Buffer; optional
The validity mask
offset : int
Data offset
null_count : int, optional
The number of null values.
If None, it is calculated automatically.
"""
_VALID_BINARY_OPERATIONS = {
"__eq__",
"__ne__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__add__",
"__sub__",
"__mul__",
"__mod__",
"__truediv__",
"__floordiv__",
"__radd__",
"__rsub__",
"__rmul__",
"__rmod__",
"__rtruediv__",
"__rfloordiv__",
}
def __init__(
self,
data: Buffer,
dtype: Dtype,
size: Optional[int] = None, # TODO: make non-optional
mask: Optional[Buffer] = None,
offset: int = 0,
null_count: Optional[int] = None,
):
dtype = cudf.dtype(dtype)
if data.size % dtype.itemsize:
raise ValueError("Buffer size must be divisible by element size")
if size is None:
size = data.size // dtype.itemsize
size = size - offset
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
)
if self.dtype.type is not np.timedelta64:
raise TypeError(f"{self.dtype} is not a supported duration type")
self._time_unit, _ = np.datetime_data(self.dtype)
def __contains__(self, item: DatetimeLikeScalar) -> bool:
try:
item = np.timedelta64(item, self._time_unit)
except ValueError:
# If item cannot be converted to duration type
# np.timedelta64 raises ValueError, hence `item`
# cannot exist in `self`.
return False
return item.view("int64") in self.as_numerical
@property
def values(self):
"""
Return a CuPy representation of the TimeDeltaColumn.
"""
raise NotImplementedError(
"TimeDelta Arrays is not yet implemented in cudf"
)
@acquire_spill_lock()
def to_arrow(self) -> pa.Array:
mask = None
if self.nullable:
mask = pa.py_buffer(
self.mask_array_view(mode="read").copy_to_host()
)
data = pa.py_buffer(
self.as_numerical.data_array_view(mode="read").copy_to_host()
)
pa_dtype = np_to_pa_dtype(self.dtype)
return pa.Array.from_buffers(
type=pa_dtype,
length=len(self),
buffers=[mask, data],
null_count=self.null_count,
)
def to_pandas(
self, index=None, nullable: bool = False, **kwargs
) -> pd.Series:
# `copy=True` workaround until following issue is fixed:
# https://issues.apache.org/jira/browse/ARROW-9772
# Pandas only supports `timedelta64[ns]` dtype
# and conversion to this type is necessary to make
# arrow to pandas conversion happen for large values.
return pd.Series(
self.astype("timedelta64[ns]").to_arrow(),
copy=True,
dtype=self.dtype,
index=index,
)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
reflect, op = self._check_reflected_op(op)
other = self._wrap_binop_normalization(other)
if other is NotImplemented:
return NotImplemented
this: ColumnBinaryOperand = self
out_dtype = None
if is_timedelta64_dtype(other.dtype):
# TODO: pandas will allow these operators to work but return false
# when comparing to non-timedelta dtypes. We should do the same.
if op in {
"__eq__",
"__ne__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
"NULL_EQUALS",
}:
out_dtype = cudf.dtype(np.bool_)
elif op == "__mod__":
out_dtype = determine_out_dtype(self.dtype, other.dtype)
elif op in {"__truediv__", "__floordiv__"}:
common_dtype = determine_out_dtype(self.dtype, other.dtype)
out_dtype = np.float64 if op == "__truediv__" else np.int64
this = self.astype(common_dtype).astype(out_dtype)
if isinstance(other, cudf.Scalar):
if other.is_valid():
other = other.value.astype(common_dtype).astype(
out_dtype
)
else:
other = cudf.Scalar(None, out_dtype)
else:
other = other.astype(common_dtype).astype(out_dtype)
elif op in {"__add__", "__sub__"}:
out_dtype = determine_out_dtype(self.dtype, other.dtype)
elif other.dtype.kind in {"f", "i", "u"}:
if op in {"__mul__", "__mod__", "__truediv__", "__floordiv__"}:
out_dtype = self.dtype
elif op in {"__eq__", "NULL_EQUALS", "__ne__"}:
if isinstance(other, ColumnBase) and not isinstance(
other, TimeDeltaColumn
):
result = _all_bools_with_nulls(
self, other, bool_fill_value=op == "__ne__"
)
if cudf.get_option("mode.pandas_compatible"):
result = result.fillna(op == "__ne__")
return result
if out_dtype is None:
return NotImplemented
lhs, rhs = (other, this) if reflect else (this, other)
result = libcudf.binaryop.binaryop(lhs, rhs, op, out_dtype)
if cudf.get_option(
"mode.pandas_compatible"
) and out_dtype == cudf.dtype(np.bool_):
result = result.fillna(op == "__ne__")
return result
def normalize_binop_value(self, other) -> ColumnBinaryOperand:
if isinstance(other, (ColumnBase, cudf.Scalar)):
return other
tz_error_msg = (
"Cannot perform binary operation on timezone-naive columns"
" and timezone-aware timestamps."
)
if isinstance(other, pd.Timestamp):
if other.tz is not None:
raise NotImplementedError(tz_error_msg)
other = other.to_datetime64()
elif isinstance(other, pd.Timedelta):
other = other.to_timedelta64()
elif isinstance(other, datetime.timedelta):
other = np.timedelta64(other)
elif isinstance(other, datetime.datetime) and other.tzinfo is not None:
raise NotImplementedError(tz_error_msg)
if isinstance(other, np.timedelta64):
other_time_unit = cudf.utils.dtypes.get_time_unit(other)
if np.isnat(other):
return cudf.Scalar(None, dtype=self.dtype)
if other_time_unit not in {"s", "ms", "ns", "us"}:
common_dtype = "timedelta64[s]"
else:
common_dtype = determine_out_dtype(self.dtype, other.dtype)
return cudf.Scalar(other.astype(common_dtype))
elif np.isscalar(other):
return cudf.Scalar(other)
return NotImplemented
@property
def as_numerical(self) -> "cudf.core.column.NumericalColumn":
return cast(
"cudf.core.column.NumericalColumn",
column.build_column(
data=self.base_data,
dtype=np.int64,
mask=self.base_mask,
offset=self.offset,
size=self.size,
),
)
@property
def time_unit(self) -> str:
return self._time_unit
def fillna(
self,
fill_value: Any = None,
method: Optional[str] = None,
dtype: Optional[Dtype] = None,
) -> TimeDeltaColumn:
if fill_value is not None:
if cudf.utils.utils._isnat(fill_value):
return self.copy(deep=True)
col: ColumnBase = self
if is_scalar(fill_value):
if isinstance(fill_value, np.timedelta64):
dtype = determine_out_dtype(self.dtype, fill_value.dtype)
fill_value = fill_value.astype(dtype)
col = col.astype(dtype)
if not isinstance(fill_value, cudf.Scalar):
fill_value = cudf.Scalar(fill_value, dtype=dtype)
else:
fill_value = column.as_column(fill_value, nan_as_null=False)
return cast(TimeDeltaColumn, ColumnBase.fillna(col, fill_value))
else:
return super().fillna(method=method)
def as_numerical_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.NumericalColumn":
return cast(
"cudf.core.column.NumericalColumn", self.as_numerical.astype(dtype)
)
def as_datetime_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.DatetimeColumn":
raise TypeError(
f"cannot astype a timedelta from {self.dtype} to {dtype}"
)
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
if format is None:
format = _dtype_to_format_conversion.get(
self.dtype.name, "%D days %H:%M:%S"
)
if len(self) > 0:
return string._timedelta_to_str_typecast_functions[
cudf.dtype(self.dtype)
](self, format=format)
else:
return cast(
"cudf.core.column.StringColumn",
column.column_empty(0, dtype="object", masked=False),
)
def as_timedelta_column(self, dtype: Dtype, **kwargs) -> TimeDeltaColumn:
dtype = cudf.dtype(dtype)
if dtype == self.dtype:
return self
return libcudf.unary.cast(self, dtype=dtype)
def mean(self, skipna=None, dtype: Dtype = np.float64) -> pd.Timedelta:
return pd.Timedelta(
self.as_numerical.mean(skipna=skipna, dtype=dtype),
unit=self.time_unit,
)
def median(self, skipna: Optional[bool] = None) -> pd.Timedelta:
return pd.Timedelta(
self.as_numerical.median(skipna=skipna), unit=self.time_unit
)
def isin(self, values: Sequence) -> ColumnBase:
return cudf.core.tools.datetimes._isin_datetimelike(self, values)
def quantile(
self,
q: np.ndarray,
interpolation: str,
exact: bool,
return_scalar: bool,
) -> ColumnBase:
result = self.as_numerical.quantile(
q=q,
interpolation=interpolation,
exact=exact,
return_scalar=return_scalar,
)
if return_scalar:
return pd.Timedelta(result, unit=self.time_unit)
return result.astype(self.dtype)
def sum(
self,
skipna: Optional[bool] = None,
min_count: int = 0,
dtype: Optional[Dtype] = None,
) -> pd.Timedelta:
return pd.Timedelta(
# Since sum isn't overridden in Numerical[Base]Column, mypy only
# sees the signature from Reducible (which doesn't have the extra
# parameters from ColumnBase._reduce) so we have to ignore this.
self.as_numerical.sum( # type: ignore
skipna=skipna, min_count=min_count, dtype=dtype
),
unit=self.time_unit,
)
def std(
self,
skipna: Optional[bool] = None,
min_count: int = 0,
dtype: Dtype = np.float64,
ddof: int = 1,
) -> pd.Timedelta:
return pd.Timedelta(
self.as_numerical.std(
skipna=skipna, min_count=min_count, ddof=ddof, dtype=dtype
),
unit=self.time_unit,
)
def components(self, index=None) -> "cudf.DataFrame":
"""
Return a Dataframe of the components of the Timedeltas.
Returns
-------
DataFrame
Examples
--------
>>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='s'))
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.components
days hours minutes seconds milliseconds microseconds nanoseconds
0 141 13 35 12 123 0 0
1 14 6 0 31 231 0 0
2 13000 10 12 48 712 0 0
3 0 0 35 35 656 0 0
4 37 13 12 14 234 0 0
""" # noqa: E501
return cudf.DataFrame(
data={
"days": self
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["D"], "ns")
),
"hours": (
self
% cudf.Scalar(
np.timedelta64(
_unit_to_nanoseconds_conversion["D"], "ns"
)
)
)
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["h"], "ns")
),
"minutes": (
self
% cudf.Scalar(
np.timedelta64(
_unit_to_nanoseconds_conversion["h"], "ns"
)
)
)
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["m"], "ns")
),
"seconds": (
self
% cudf.Scalar(
np.timedelta64(
_unit_to_nanoseconds_conversion["m"], "ns"
)
)
)
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["s"], "ns")
),
"milliseconds": (
self
% cudf.Scalar(
np.timedelta64(
_unit_to_nanoseconds_conversion["s"], "ns"
)
)
)
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["ms"], "ns")
),
"microseconds": (
self
% cudf.Scalar(
np.timedelta64(
_unit_to_nanoseconds_conversion["ms"], "ns"
)
)
)
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["us"], "ns")
),
"nanoseconds": (
self
% cudf.Scalar(
np.timedelta64(
_unit_to_nanoseconds_conversion["us"], "ns"
)
)
)
// cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["ns"], "ns")
),
},
index=index,
)
@property
def days(self) -> "cudf.core.column.NumericalColumn":
"""
Number of days for each element.
Returns
-------
NumericalColumn
"""
return self // cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["D"], "ns")
)
@property
def seconds(self) -> "cudf.core.column.NumericalColumn":
"""
Number of seconds (>= 0 and less than 1 day).
Returns
-------
NumericalColumn
"""
# This property must return the number of seconds (>= 0 and
# less than 1 day) for each element, hence first performing
# mod operation to remove the number of days and then performing
# division operation to extract the number of seconds.
return (
self
% cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["D"], "ns")
)
) // cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["s"], "ns")
)
@property
def microseconds(self) -> "cudf.core.column.NumericalColumn":
"""
Number of microseconds (>= 0 and less than 1 second).
Returns
-------
NumericalColumn
"""
# This property must return the number of microseconds (>= 0 and
# less than 1 second) for each element, hence first performing
# mod operation to remove the number of seconds and then performing
# division operation to extract the number of microseconds.
return (
self % np.timedelta64(_unit_to_nanoseconds_conversion["s"], "ns")
) // cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["us"], "ns")
)
@property
def nanoseconds(self) -> "cudf.core.column.NumericalColumn":
"""
Return the number of nanoseconds (n), where 0 <= n < 1 microsecond.
Returns
-------
NumericalColumn
"""
# This property must return the number of nanoseconds (>= 0 and
# less than 1 microsecond) for each element, hence first performing
# mod operation to remove the number of microseconds and then
# performing division operation to extract the number
# of nanoseconds.
return (
self
% cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["us"], "ns")
)
) // cudf.Scalar(
np.timedelta64(_unit_to_nanoseconds_conversion["ns"], "ns")
)
def determine_out_dtype(lhs_dtype: Dtype, rhs_dtype: Dtype) -> Dtype:
if np.can_cast(np.dtype(lhs_dtype), np.dtype(rhs_dtype)):
return rhs_dtype
elif np.can_cast(np.dtype(rhs_dtype), np.dtype(lhs_dtype)):
return lhs_dtype
else:
raise TypeError(f"Cannot type-cast {lhs_dtype} and {rhs_dtype}")
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/interval.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from typing import Optional
import pandas as pd
import pyarrow as pa
import cudf
from cudf.api.types import is_categorical_dtype, is_interval_dtype
from cudf.core.column import StructColumn
from cudf.core.dtypes import IntervalDtype
class IntervalColumn(StructColumn):
def __init__(
self,
dtype,
mask=None,
size=None,
offset=0,
null_count=None,
children=(),
closed="right",
):
super().__init__(
data=None,
dtype=dtype,
mask=mask,
size=size,
offset=offset,
null_count=null_count,
children=children,
)
if closed in ["left", "right", "neither", "both"]:
self._closed = closed
else:
raise ValueError("closed value is not valid")
@property
def closed(self):
return self._closed
@classmethod
def from_arrow(cls, data):
new_col = super().from_arrow(data.storage)
size = len(data)
dtype = IntervalDtype.from_arrow(data.type)
mask = data.buffers()[0]
if mask is not None:
mask = cudf.utils.utils.pa_mask_buffer_to_mask(mask, len(data))
offset = data.offset
null_count = data.null_count
children = new_col.children
closed = dtype.closed
return IntervalColumn(
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
closed=closed,
)
def to_arrow(self):
typ = self.dtype.to_arrow()
struct_arrow = super().to_arrow()
if len(struct_arrow) == 0:
# struct arrow is pa.struct array with null children types
# we need to make sure its children have non-null type
struct_arrow = pa.array([], typ.storage_type)
return pa.ExtensionArray.from_storage(typ, struct_arrow)
@classmethod
def from_struct_column(cls, struct_column: StructColumn, closed="right"):
first_field_name = list(struct_column.dtype.fields.keys())[0]
return IntervalColumn(
size=struct_column.size,
dtype=IntervalDtype(
struct_column.dtype.fields[first_field_name], closed
),
mask=struct_column.base_mask,
offset=struct_column.offset,
null_count=struct_column.null_count,
children=struct_column.base_children,
closed=closed,
)
def copy(self, deep=True):
closed = self.closed
struct_copy = super().copy(deep=deep)
return IntervalColumn(
size=struct_copy.size,
dtype=IntervalDtype(struct_copy.dtype.fields["left"], closed),
mask=struct_copy.base_mask,
offset=struct_copy.offset,
null_count=struct_copy.null_count,
children=struct_copy.base_children,
closed=closed,
)
def as_interval_column(self, dtype, **kwargs):
if is_interval_dtype(dtype):
if is_categorical_dtype(self):
new_struct = self._get_decategorized_column()
return IntervalColumn.from_struct_column(new_struct)
if is_interval_dtype(dtype):
# a user can directly input the string `interval` as the dtype
# when creating an interval series or interval dataframe
if dtype == "interval":
dtype = IntervalDtype(
self.dtype.fields["left"], self.closed
)
children = self.children
return IntervalColumn(
size=self.size,
dtype=dtype,
mask=self.mask,
offset=self.offset,
null_count=self.null_count,
children=children,
closed=dtype.closed,
)
else:
raise ValueError("dtype must be IntervalDtype")
def to_pandas(
self, index: Optional[pd.Index] = None, **kwargs
) -> pd.Series:
# Note: This does not handle null values in the interval column.
# However, this exact sequence (calling __from_arrow__ on the output of
# self.to_arrow) is currently the best known way to convert interval
# types into pandas (trying to convert the underlying numerical columns
# directly is problematic), so we're stuck with this for now.
return pd.Series(
self.dtype.to_pandas().__from_arrow__(self.to_arrow()), index=index
)
def element_indexing(self, index: int):
result = super().element_indexing(index)
if cudf.get_option("mode.pandas_compatible"):
return pd.Interval(**result, closed=self._closed)
return {
field: value
for field, value in zip(self.dtype.fields, result.values())
}
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/numerical_base.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
"""Define an interface for columns that can perform numerical operations."""
from __future__ import annotations
from typing import Optional, cast
import numpy as np
import cudf
from cudf import _lib as libcudf
from cudf._typing import ScalarLike
from cudf.core.column import ColumnBase
from cudf.core.missing import NA
from cudf.core.mixins import Scannable
class NumericalBaseColumn(ColumnBase, Scannable):
"""A column composed of numerical data.
This class encodes a standard interface for different types of columns
containing numerical types of data. In particular, mathematical operations
that make sense whether a column is integral or real, fixed or floating
point, should be encoded here.
"""
_VALID_REDUCTIONS = {
"sum",
"product",
"sum_of_squares",
"mean",
"var",
"std",
}
_VALID_SCANS = {
"cumsum",
"cumprod",
"cummin",
"cummax",
}
def _can_return_nan(self, skipna: Optional[bool] = None) -> bool:
return not skipna and self.has_nulls()
def kurtosis(self, skipna: Optional[bool] = None) -> float:
skipna = True if skipna is None else skipna
if len(self) == 0 or self._can_return_nan(skipna=skipna):
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
self = self.nans_to_nulls().dropna() # type: ignore
if len(self) < 4:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
n = len(self)
miu = self.mean()
m4_numerator = ((self - miu) ** self.normalize_binop_value(4)).sum()
V = self.var()
if V == 0:
return 0
term_one_section_one = (n * (n + 1)) / ((n - 1) * (n - 2) * (n - 3))
term_one_section_two = m4_numerator / (V**2)
term_two = ((n - 1) ** 2) / ((n - 2) * (n - 3))
kurt = term_one_section_one * term_one_section_two - 3 * term_two
return kurt
def skew(self, skipna: Optional[bool] = None) -> ScalarLike:
skipna = True if skipna is None else skipna
if len(self) == 0 or self._can_return_nan(skipna=skipna):
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
self = self.nans_to_nulls().dropna() # type: ignore
if len(self) < 3:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
n = len(self)
miu = self.mean()
m3 = (((self - miu) ** self.normalize_binop_value(3)).sum()) / n
m2 = self.var(ddof=0)
if m2 == 0:
return 0
unbiased_coef = ((n * (n - 1)) ** 0.5) / (n - 2)
skew = unbiased_coef * m3 / (m2 ** (3 / 2))
return skew
def quantile(
self,
q: np.ndarray,
interpolation: str,
exact: bool,
return_scalar: bool,
) -> NumericalBaseColumn:
if np.logical_or(q < 0, q > 1).any():
raise ValueError(
"percentiles should all be in the interval [0, 1]"
)
# Beyond this point, q either being scalar or list-like
# will only have values in range [0, 1]
if len(self) == 0:
result = cast(
NumericalBaseColumn,
cudf.core.column.column_empty(
row_count=len(q), dtype=self.dtype, masked=True
),
)
else:
result = self._numeric_quantile(q, interpolation, exact)
if return_scalar:
scalar_result = result.element_indexing(0)
if interpolation in {"lower", "higher", "nearest"}:
try:
new_scalar = self.dtype.type(scalar_result)
scalar_result = (
new_scalar
if new_scalar == scalar_result
else scalar_result
)
except (TypeError, ValueError):
pass
return (
cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
if scalar_result is NA
else scalar_result
)
return result
def mean(
self,
skipna: Optional[bool] = None,
min_count: int = 0,
dtype=np.float64,
):
return self._reduce(
"mean", skipna=skipna, min_count=min_count, dtype=dtype
)
def var(
self,
skipna: Optional[bool] = None,
min_count: int = 0,
dtype=np.float64,
ddof=1,
):
return self._reduce(
"var", skipna=skipna, min_count=min_count, dtype=dtype, ddof=ddof
)
def std(
self,
skipna: Optional[bool] = None,
min_count: int = 0,
dtype=np.float64,
ddof=1,
):
return self._reduce(
"std", skipna=skipna, min_count=min_count, dtype=dtype, ddof=ddof
)
def median(self, skipna: Optional[bool] = None) -> NumericalBaseColumn:
skipna = True if skipna is None else skipna
if self._can_return_nan(skipna=skipna):
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
# enforce linear in case the default ever changes
return self.quantile(
np.array([0.5]),
interpolation="linear",
exact=True,
return_scalar=True,
)
def _numeric_quantile(
self, q: np.ndarray, interpolation: str, exact: bool
) -> NumericalBaseColumn:
# get sorted indices and exclude nulls
indices = libcudf.sort.order_by(
[self], [True], "first", stable=True
).slice(self.null_count, len(self))
return libcudf.quantiles.quantile(
self, q, interpolation, indices, exact
)
def cov(self, other: NumericalBaseColumn) -> float:
if (
len(self) == 0
or len(other) == 0
or (len(self) == 1 and len(other) == 1)
):
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
result = (self - self.mean()) * (other - other.mean())
cov_sample = result.sum() / (len(self) - 1)
return cov_sample
def corr(self, other: NumericalBaseColumn) -> float:
if len(self) == 0 or len(other) == 0:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
cov = self.cov(other)
lhs_std, rhs_std = self.std(), other.std()
if not cov or lhs_std == 0 or rhs_std == 0:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
return cov / lhs_std / rhs_std
def round(
self, decimals: int = 0, how: str = "half_even"
) -> NumericalBaseColumn:
if not cudf.api.types.is_integer(decimals):
raise TypeError("Values in decimals must be integers")
"""Round the values in the Column to the given number of decimals."""
return libcudf.round.round(self, decimal_places=decimals, how=how)
def _scan(self, op: str) -> ColumnBase:
return libcudf.reduce.scan(
op.replace("cum", ""), self, True
)._with_type_metadata(self.dtype)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/__init__.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
"""
isort: skip_file
"""
from cudf.core.column.categorical import CategoricalColumn
from cudf.core.column.column import (
ColumnBase,
arange,
as_column,
build_categorical_column,
build_column,
build_list_column,
build_struct_column,
column_empty,
column_empty_like,
column_empty_like_same_mask,
concat_columns,
deserialize_columns,
full,
serialize_columns,
)
from cudf.core.column.datetime import DatetimeColumn # noqa: F401
from cudf.core.column.datetime import DatetimeTZColumn # noqa: F401
from cudf.core.column.lists import ListColumn # noqa: F401
from cudf.core.column.numerical import NumericalColumn # noqa: F401
from cudf.core.column.string import StringColumn # noqa: F401
from cudf.core.column.struct import StructColumn # noqa: F401
from cudf.core.column.timedelta import TimeDeltaColumn # noqa: F401
from cudf.core.column.interval import IntervalColumn # noqa: F401
from cudf.core.column.decimal import ( # noqa: F401
Decimal32Column,
Decimal64Column,
Decimal128Column,
DecimalBaseColumn,
)
from cudf.core.column.interval import IntervalColumn # noqa: F401
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/core
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/column/numerical.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from __future__ import annotations
from typing import (
Any,
Callable,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
)
import cupy as cp
import numpy as np
import pandas as pd
import cudf
from cudf import _lib as libcudf
from cudf._lib.stream_compaction import drop_nulls
from cudf._lib.types import size_type_dtype
from cudf._typing import (
ColumnBinaryOperand,
ColumnLike,
Dtype,
DtypeObj,
ScalarLike,
)
from cudf.api.types import (
is_bool_dtype,
is_float_dtype,
is_integer,
is_integer_dtype,
is_scalar,
)
from cudf.core.buffer import Buffer, cuda_array_interface_wrapper
from cudf.core.column import (
ColumnBase,
as_column,
build_column,
column,
full,
string,
)
from cudf.core.dtypes import CategoricalDtype
from cudf.core.mixins import BinaryOperand
from cudf.utils.dtypes import (
NUMERIC_TYPES,
min_column_type,
min_signed_type,
np_dtypes_to_pandas_dtypes,
numeric_normalize_types,
)
from .numerical_base import NumericalBaseColumn
class NumericalColumn(NumericalBaseColumn):
"""
A Column object for Numeric types.
Parameters
----------
data : Buffer
dtype : np.dtype
The dtype associated with the data Buffer
mask : Buffer, optional
"""
_nan_count: Optional[int]
_VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS
def __init__(
self,
data: Buffer,
dtype: DtypeObj,
mask: Optional[Buffer] = None,
size: Optional[int] = None, # TODO: make this non-optional
offset: int = 0,
null_count: Optional[int] = None,
):
dtype = cudf.dtype(dtype)
if data.size % dtype.itemsize:
raise ValueError("Buffer size must be divisible by element size")
if size is None:
size = (data.size // dtype.itemsize) - offset
self._nan_count = None
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
)
def _clear_cache(self):
super()._clear_cache()
self._nan_count = None
def __contains__(self, item: ScalarLike) -> bool:
"""
Returns True if column contains item, else False.
"""
# Handles improper item types
# Fails if item is of type None, so the handler.
try:
if np.can_cast(item, self.dtype):
item = self.dtype.type(item)
else:
return False
except (TypeError, ValueError):
return False
# TODO: Use `scalar`-based `contains` wrapper
return libcudf.search.contains(
self, column.as_column([item], dtype=self.dtype)
).any()
def indices_of(self, value: ScalarLike) -> NumericalColumn:
if isinstance(value, (bool, np.bool_)) and self.dtype.kind != "b":
raise ValueError(
f"Cannot use a {type(value).__name__} to find an index of "
f"a {self.dtype} Index."
)
if (
value is not None
and self.dtype.kind in {"c", "f"}
and np.isnan(value)
):
return column.as_column(
cp.argwhere(
cp.isnan(self.data_array_view(mode="read"))
).flatten(),
dtype=size_type_dtype,
)
else:
return super().indices_of(value)
def has_nulls(self, include_nan=False):
return bool(self.null_count != 0) or (
include_nan and bool(self.nan_count != 0)
)
def __setitem__(self, key: Any, value: Any):
"""
Set the value of ``self[key]`` to ``value``.
If ``value`` and ``self`` are of different types, ``value`` is coerced
to ``self.dtype``.
"""
# Normalize value to scalar/column
device_value = (
cudf.Scalar(
value,
dtype=self.dtype
if cudf._lib.scalar._is_null_host_scalar(value)
else None,
)
if is_scalar(value)
else as_column(value)
)
if not is_bool_dtype(self.dtype) and is_bool_dtype(device_value.dtype):
raise TypeError(f"Invalid value {value} for dtype {self.dtype}")
else:
device_value = device_value.astype(self.dtype)
out: Optional[ColumnBase] # If None, no need to perform mimic inplace.
if isinstance(key, slice):
out = self._scatter_by_slice(key, device_value)
else:
key = as_column(key)
if not isinstance(key, cudf.core.column.NumericalColumn):
raise ValueError(f"Invalid scatter map type {key.dtype}.")
out = self._scatter_by_column(key, device_value)
if out:
self._mimic_inplace(out, inplace=True)
@property
def __cuda_array_interface__(self) -> Mapping[str, Any]:
output = {
"shape": (len(self),),
"strides": (self.dtype.itemsize,),
"typestr": self.dtype.str,
"data": (self.data_ptr, False),
"version": 1,
}
if self.nullable and self.has_nulls():
# Create a simple Python object that exposes the
# `__cuda_array_interface__` attribute here since we need to modify
# some of the attributes from the numba device array
output["mask"] = cuda_array_interface_wrapper(
ptr=self.mask_ptr,
size=len(self),
owner=self.mask,
readonly=True,
typestr="<t1",
)
return output
def unary_operator(self, unaryop: Union[str, Callable]) -> ColumnBase:
if callable(unaryop):
return libcudf.transform.transform(self, unaryop)
unaryop = libcudf.unary.UnaryOp[unaryop.upper()]
return libcudf.unary.unary_operation(self, unaryop)
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
int_float_dtype_mapping = {
np.int8: np.float32,
np.int16: np.float32,
np.int32: np.float32,
np.int64: np.float64,
np.uint8: np.float32,
np.uint16: np.float32,
np.uint32: np.float64,
np.uint64: np.float64,
np.bool_: np.float32,
}
if op in {"__truediv__", "__rtruediv__"}:
# Division with integer types results in a suitable float.
if truediv_type := int_float_dtype_mapping.get(self.dtype.type):
return self.astype(truediv_type)._binaryop(other, op)
reflect, op = self._check_reflected_op(op)
if (other := self._wrap_binop_normalization(other)) is NotImplemented:
return NotImplemented
out_dtype = self.dtype
if other is not None:
out_dtype = np.result_type(self.dtype, other.dtype)
if op in {"__mod__", "__floordiv__"}:
tmp = self if reflect else other
# Guard against division by zero for integers.
if (
(tmp.dtype.type in int_float_dtype_mapping)
and (tmp.dtype.type != np.bool_)
and (
(
(
np.isscalar(tmp)
or (
isinstance(tmp, cudf.Scalar)
# host to device copy
and tmp.is_valid()
)
)
and (0 == tmp)
)
or ((isinstance(tmp, NumericalColumn)) and (0 in tmp))
)
):
out_dtype = cudf.dtype("float64")
if op in {
"__lt__",
"__gt__",
"__le__",
"__ge__",
"__eq__",
"__ne__",
"NULL_EQUALS",
}:
out_dtype = "bool"
if op in {"__and__", "__or__", "__xor__"}:
if is_float_dtype(self.dtype) or is_float_dtype(other):
raise TypeError(
f"Operation 'bitwise {op[2:-2]}' not supported between "
f"{self.dtype.type.__name__} and "
f"{other.dtype.type.__name__}"
)
if is_bool_dtype(self.dtype) or is_bool_dtype(other):
out_dtype = "bool"
if (
op == "__pow__"
and is_integer_dtype(self.dtype)
and (is_integer(other) or is_integer_dtype(other.dtype))
):
op = "INT_POW"
lhs, rhs = (other, self) if reflect else (self, other)
return libcudf.binaryop.binaryop(lhs, rhs, op, out_dtype)
def nans_to_nulls(self: NumericalColumn) -> NumericalColumn:
# Only floats can contain nan.
if self.dtype.kind != "f" or self.nan_count == 0:
return self
newmask = libcudf.transform.nans_to_nulls(self)
return self.set_mask(newmask)
def normalize_binop_value(
self, other: ScalarLike
) -> Union[ColumnBase, cudf.Scalar]:
if isinstance(other, ColumnBase):
if not isinstance(other, NumericalColumn):
return NotImplemented
return other
if isinstance(other, cudf.Scalar):
if self.dtype == other.dtype:
return other
# expensive device-host transfer just to
# adjust the dtype
other = other.value
# Try and match pandas and hence numpy. Deduce the common
# dtype via the _value_ of other, and the dtype of self. TODO:
# When NEP50 is accepted, this might want changed or
# simplified.
# This is not at all simple:
# np.result_type(np.int64(0), np.uint8)
# => np.uint8
# np.result_type(np.asarray([0], dtype=np.int64), np.uint8)
# => np.int64
# np.promote_types(np.int64(0), np.uint8)
# => np.int64
# np.promote_types(np.asarray([0], dtype=np.int64).dtype, np.uint8)
# => np.int64
common_dtype = np.result_type(self.dtype, other)
if common_dtype.kind in {"b", "i", "u", "f"}:
if self.dtype.kind == "b":
common_dtype = min_signed_type(other)
return cudf.Scalar(other, dtype=common_dtype)
else:
return NotImplemented
def int2ip(self) -> "cudf.core.column.StringColumn":
if self.dtype != cudf.dtype("int64"):
raise TypeError("Only int64 type can be converted to ip")
return libcudf.string_casting.int2ip(self)
def as_string_column(
self, dtype: Dtype, format=None, **kwargs
) -> "cudf.core.column.StringColumn":
if len(self) > 0:
return string._numeric_to_str_typecast_functions[
cudf.dtype(self.dtype)
](self)
else:
return cast(
"cudf.core.column.StringColumn", as_column([], dtype="object")
)
def as_datetime_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.DatetimeColumn":
return cast(
"cudf.core.column.DatetimeColumn",
build_column(
data=self.astype("int64").base_data,
dtype=dtype,
mask=self.base_mask,
offset=self.offset,
size=self.size,
),
)
def as_timedelta_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.TimeDeltaColumn":
return cast(
"cudf.core.column.TimeDeltaColumn",
build_column(
data=self.astype("int64").base_data,
dtype=dtype,
mask=self.base_mask,
offset=self.offset,
size=self.size,
),
)
def as_decimal_column(
self, dtype: Dtype, **kwargs
) -> "cudf.core.column.DecimalBaseColumn":
return libcudf.unary.cast(self, dtype)
def as_numerical_column(self, dtype: Dtype, **kwargs) -> NumericalColumn:
dtype = cudf.dtype(dtype)
if dtype == self.dtype:
return self
return libcudf.unary.cast(self, dtype)
def all(self, skipna: bool = True) -> bool:
# If all entries are null the result is True, including when the column
# is empty.
result_col = self.nans_to_nulls() if skipna else self
if result_col.null_count == result_col.size:
return True
return libcudf.reduce.reduce("all", result_col, dtype=np.bool_)
def any(self, skipna: bool = True) -> bool:
# Early exit for fast cases.
result_col = self.nans_to_nulls() if skipna else self
if not skipna and result_col.has_nulls():
return True
elif skipna and result_col.null_count == result_col.size:
return False
return libcudf.reduce.reduce("any", result_col, dtype=np.bool_)
@property
def nan_count(self) -> int:
if self.dtype.kind != "f":
self._nan_count = 0
elif self._nan_count is None:
nan_col = libcudf.unary.is_nan(self)
self._nan_count = nan_col.sum()
return self._nan_count
def dropna(self, drop_nan: bool = False) -> NumericalColumn:
col = self.nans_to_nulls() if drop_nan else self
return drop_nulls([col])[0]
@property
def contains_na_entries(self) -> bool:
return (self.nan_count != 0) or (self.null_count != 0)
def _process_values_for_isin(
self, values: Sequence
) -> Tuple[ColumnBase, ColumnBase]:
lhs = cast("cudf.core.column.ColumnBase", self)
rhs = as_column(values, nan_as_null=False)
if isinstance(rhs, NumericalColumn):
rhs = rhs.astype(dtype=self.dtype)
if lhs.null_count == len(lhs):
lhs = lhs.astype(rhs.dtype)
elif rhs.null_count == len(rhs):
rhs = rhs.astype(lhs.dtype)
return lhs, rhs
def _can_return_nan(self, skipna: Optional[bool] = None) -> bool:
return not skipna and self.has_nulls(include_nan=True)
def _process_for_reduction(
self, skipna: Optional[bool] = None, min_count: int = 0
) -> Union[NumericalColumn, ScalarLike]:
skipna = True if skipna is None else skipna
if self._can_return_nan(skipna=skipna):
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
col = self.nans_to_nulls() if skipna else self
return super(NumericalColumn, col)._process_for_reduction(
skipna=skipna, min_count=min_count
)
def find_and_replace(
self,
to_replace: ColumnLike,
replacement: ColumnLike,
all_nan: bool = False,
) -> NumericalColumn:
"""
Return col with *to_replace* replaced with *value*.
"""
# If all of `to_replace`/`replacement` are `None`,
# dtype of `to_replace_col`/`replacement_col`
# is inferred as `string`, but this is a valid
# float64 column too, Hence we will need to type-cast
# to self.dtype.
to_replace_col = column.as_column(to_replace)
if to_replace_col.null_count == len(to_replace_col):
to_replace_col = to_replace_col.astype(self.dtype)
replacement_col = column.as_column(replacement)
if replacement_col.null_count == len(replacement_col):
replacement_col = replacement_col.astype(self.dtype)
if not isinstance(to_replace_col, type(replacement_col)):
raise TypeError(
f"to_replace and value should be of same types,"
f"got to_replace dtype: {to_replace_col.dtype} and "
f"value dtype: {replacement_col.dtype}"
)
if not isinstance(to_replace_col, NumericalColumn) and not isinstance(
replacement_col, NumericalColumn
):
return self.copy()
to_replace_col = _normalize_find_and_replace_input(
self.dtype, to_replace
)
if all_nan:
replacement_col = column.as_column(replacement, dtype=self.dtype)
else:
replacement_col = _normalize_find_and_replace_input(
self.dtype, replacement
)
if len(replacement_col) == 1 and len(to_replace_col) > 1:
replacement_col = column.as_column(
full(len(to_replace_col), replacement[0], self.dtype)
)
elif len(replacement_col) == 1 and len(to_replace_col) == 0:
return self.copy()
to_replace_col, replacement_col, replaced = numeric_normalize_types(
to_replace_col, replacement_col, self
)
df = cudf.DataFrame._from_data(
{"old": to_replace_col, "new": replacement_col}
)
df = df.drop_duplicates(subset=["old"], keep="last", ignore_index=True)
if df._data["old"].null_count == 1:
replaced = replaced.fillna(
df._data["new"]
.apply_boolean_mask(df._data["old"].isnull())
.element_indexing(0)
)
df = df.dropna(subset=["old"])
return libcudf.replace.replace(
replaced, df._data["old"], df._data["new"]
)
def fillna(
self,
fill_value: Any = None,
method: Optional[str] = None,
dtype: Optional[Dtype] = None,
fill_nan: bool = True,
) -> NumericalColumn:
"""
Fill null values with *fill_value*
"""
col = self.nans_to_nulls() if fill_nan else self
if col.null_count == 0:
return col
if method is not None:
return super(NumericalColumn, col).fillna(fill_value, method)
if fill_value is None:
raise ValueError("Must specify either 'fill_value' or 'method'")
if (
isinstance(fill_value, cudf.Scalar)
and fill_value.dtype == col.dtype
):
return super(NumericalColumn, col).fillna(fill_value, method)
if np.isscalar(fill_value):
# cast safely to the same dtype as self
fill_value_casted = col.dtype.type(fill_value)
if not np.isnan(fill_value) and (fill_value_casted != fill_value):
raise TypeError(
f"Cannot safely cast non-equivalent "
f"{type(fill_value).__name__} to {col.dtype.name}"
)
fill_value = cudf.Scalar(fill_value_casted)
else:
fill_value = column.as_column(fill_value, nan_as_null=False)
if is_integer_dtype(col.dtype):
# cast safely to the same dtype as self
if fill_value.dtype != col.dtype:
new_fill_value = fill_value.astype(col.dtype)
if not (new_fill_value == fill_value).all():
raise TypeError(
f"Cannot safely cast non-equivalent "
f"{col.dtype.type.__name__} to "
f"{cudf.dtype(dtype).type.__name__}"
)
fill_value = new_fill_value
else:
fill_value = fill_value.astype(col.dtype)
return super(NumericalColumn, col).fillna(fill_value, method)
def can_cast_safely(self, to_dtype: DtypeObj) -> bool:
"""
Returns true if all the values in self can be
safely cast to dtype
"""
if self.dtype.kind == to_dtype.kind:
if self.dtype <= to_dtype:
return True
else:
# Kinds are the same but to_dtype is smaller
if "float" in to_dtype.name:
finfo = np.finfo(to_dtype)
lower_, upper_ = finfo.min, finfo.max
elif "int" in to_dtype.name:
iinfo = np.iinfo(to_dtype)
lower_, upper_ = iinfo.min, iinfo.max
if self.dtype.kind == "f":
# Exclude 'np.inf', '-np.inf'
s = cudf.Series(self)
# TODO: replace np.inf with cudf scalar when
# https://github.com/rapidsai/cudf/pull/6297 merges
non_infs = s[~((s == np.inf) | (s == -np.inf))]
col = non_infs._column
else:
col = self
min_ = col.min()
# TODO: depending on implementation of cudf scalar and future
# refactor of min/max, change the test method
if np.isnan(min_):
# Column contains only infs
return True
return (min_ >= lower_) and (col.max() < upper_)
# want to cast int to uint
elif self.dtype.kind == "i" and to_dtype.kind == "u":
i_max_ = np.iinfo(self.dtype).max
u_max_ = np.iinfo(to_dtype).max
return (self.min() >= 0) and (
(i_max_ <= u_max_) or (self.max() < u_max_)
)
# want to cast uint to int
elif self.dtype.kind == "u" and to_dtype.kind == "i":
u_max_ = np.iinfo(self.dtype).max
i_max_ = np.iinfo(to_dtype).max
return (u_max_ <= i_max_) or (self.max() < i_max_)
# want to cast int to float
elif self.dtype.kind in {"i", "u"} and to_dtype.kind == "f":
info = np.finfo(to_dtype)
biggest_exact_int = 2 ** (info.nmant + 1)
if (self.min() >= -biggest_exact_int) and (
self.max() <= biggest_exact_int
):
return True
else:
filled = self.fillna(0)
return (
cudf.Series(filled).astype(to_dtype).astype(filled.dtype)
== cudf.Series(filled)
).all()
# want to cast float to int:
elif self.dtype.kind == "f" and to_dtype.kind in {"i", "u"}:
iinfo = np.iinfo(to_dtype)
min_, max_ = iinfo.min, iinfo.max
# best we can do is hope to catch it here and avoid compare
if (self.min() >= min_) and (self.max() <= max_):
filled = self.fillna(0, fill_nan=False)
return (cudf.Series(filled) % 1 == 0).all()
else:
return False
return False
def _with_type_metadata(self: ColumnBase, dtype: Dtype) -> ColumnBase:
if isinstance(dtype, CategoricalDtype):
return column.build_categorical_column(
categories=dtype.categories._values,
codes=build_column(self.base_data, dtype=self.dtype),
mask=self.base_mask,
ordered=dtype.ordered,
size=self.size,
offset=self.offset,
null_count=self.null_count,
)
return self
def to_pandas(
self,
index: Optional[pd.Index] = None,
nullable: bool = False,
**kwargs,
) -> pd.Series:
if nullable and self.dtype in np_dtypes_to_pandas_dtypes:
pandas_nullable_dtype = np_dtypes_to_pandas_dtypes[self.dtype]
arrow_array = self.to_arrow()
pandas_array = pandas_nullable_dtype.__from_arrow__(arrow_array)
pd_series = pd.Series(pandas_array, copy=False)
elif str(self.dtype) in NUMERIC_TYPES and not self.has_nulls():
pd_series = pd.Series(self.values_host, copy=False)
else:
pd_series = self.to_arrow().to_pandas(**kwargs)
if index is not None:
pd_series.index = index
return pd_series
def _reduction_result_dtype(self, reduction_op: str) -> Dtype:
col_dtype = self.dtype
if reduction_op in {"sum", "product"}:
col_dtype = (
col_dtype if col_dtype.kind == "f" else np.dtype("int64")
)
elif reduction_op == "sum_of_squares":
col_dtype = np.find_common_type([col_dtype], [np.dtype("uint64")])
return col_dtype
def _normalize_find_and_replace_input(
input_column_dtype: DtypeObj, col_to_normalize: Union[ColumnBase, list]
) -> ColumnBase:
normalized_column = column.as_column(
col_to_normalize,
dtype=input_column_dtype if len(col_to_normalize) <= 0 else None,
)
col_to_normalize_dtype = normalized_column.dtype
if isinstance(col_to_normalize, list):
if normalized_column.null_count == len(normalized_column):
normalized_column = normalized_column.astype(input_column_dtype)
col_to_normalize_dtype = min_column_type(
normalized_column, input_column_dtype
)
# Scalar case
if len(col_to_normalize) == 1:
if cudf._lib.scalar._is_null_host_scalar(col_to_normalize[0]):
return normalized_column.astype(input_column_dtype)
if np.isinf(col_to_normalize[0]):
return normalized_column
col_to_normalize_casted = np.array(col_to_normalize[0]).astype(
input_column_dtype
)
if not np.isnan(col_to_normalize_casted) and (
col_to_normalize_casted != col_to_normalize[0]
):
raise TypeError(
f"Cannot safely cast non-equivalent "
f"{col_to_normalize[0]} "
f"to {input_column_dtype.name}"
)
else:
col_to_normalize_dtype = input_column_dtype
elif hasattr(col_to_normalize, "dtype"):
col_to_normalize_dtype = col_to_normalize.dtype
else:
raise TypeError(f"Type {type(col_to_normalize)} not supported")
if (
col_to_normalize_dtype.kind == "f"
and input_column_dtype.kind in {"i", "u"}
) or (col_to_normalize_dtype.num > input_column_dtype.num):
raise TypeError(
f"Potentially unsafe cast for non-equivalent "
f"{col_to_normalize_dtype.name} "
f"to {input_column_dtype.name}"
)
return normalized_column.astype(input_column_dtype)
def digitize(
column: ColumnBase, bins: np.ndarray, right: bool = False
) -> ColumnBase:
"""Return the indices of the bins to which each value in column belongs.
Parameters
----------
column : Column
Input column.
bins : Column-like
1-D column-like object of bins with same type as `column`, should be
monotonically increasing.
right : bool
Indicates whether interval contains the right or left bin edge.
Returns
-------
A column containing the indices
"""
if not column.dtype == bins.dtype:
raise ValueError(
"Digitize() expects bins and input column have the same dtype."
)
bin_col = as_column(bins, dtype=bins.dtype)
if bin_col.nullable:
raise ValueError("`bins` cannot contain null entries.")
return as_column(libcudf.sort.digitize([column], [bin_col], right))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/benchmarks/bench_cudf_io.py
|
# Copyright (c) 2020, NVIDIA CORPORATION.
import glob
import io
import pytest
from conftest import option
import cudf
def get_dataset_dir():
if option.dataset_dir == "NONE":
return "cudf/benchmarks/cuio_data/datasets/"
return option.dataset_dir
@pytest.mark.parametrize("skiprows", [None, 100000, 200000])
@pytest.mark.parametrize("file_path", glob.glob(get_dataset_dir() + "avro_*"))
def bench_avro(benchmark, file_path, use_buffer, skiprows):
if use_buffer == "True":
with open(file_path, "rb") as f:
file_path = io.BytesIO(f.read())
benchmark(cudf.read_avro, file_path, skiprows=skiprows)
def get_dtypes(file_path):
if "_unsigned_int_" in file_path:
return ["uint8", "uint16", "uint32", "uint64"] * 16
elif "_int_" in file_path:
return ["int8", "int16", "int32", "int64"] * 16
elif "_float_" in file_path:
return ["float32", "float64"] * 32
elif "_str_" in file_path:
return ["str"] * 64
elif "_datetime64_" in file_path:
return [
"timestamp[s]",
"timestamp[ms]",
"timestamp[us]",
"timestamp[ns]",
] * 16
elif "_timedelta64_" in file_path:
return [
"timedelta64[s]",
"timedelta64[ms]",
"timedelta64[us]",
"timedelta64[ns]",
] * 16
elif "_bool_" in file_path:
return ["bool"] * 64
else:
raise TypeError("Unsupported dtype file")
@pytest.mark.parametrize("dtype", ["infer", "provide"])
@pytest.mark.parametrize("file_path", glob.glob(get_dataset_dir() + "json_*"))
def bench_json(benchmark, file_path, use_buffer, dtype):
if "bz2" in file_path:
compression = "bz2"
elif "gzip" in file_path:
compression = "gzip"
elif "infer" in file_path:
compression = "infer"
else:
raise TypeError("Unsupported compression type")
if dtype == "infer":
dtype = True
else:
dtype = get_dtypes(file_path)
if use_buffer == "True":
with open(file_path, "rb") as f:
file_path = io.BytesIO(f.read())
benchmark(
cudf.read_json,
file_path,
engine="cudf",
compression=compression,
lines=True,
orient="records",
dtype=dtype,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/benchmarks/README.md
|
# cuDF benchmarks
## Overview
This directory contains source and configuration files for benchmarking
`cuDF`. The sources are currently intended to benchmark `cuDF` via the
python API, but this is not a requirement.
## Prerequisites
### Datasets
* Download datasets using `get_datasets.sh` shell file. Currently there are
only avro and json datasets.
## Usage
### Python
* Run benchmarks using pytest as shown below
```
pytest cudf/benchmarks/
```
* cuIO benchmarks have option of using file path directly or memory buffers,
by default file path option is enabled. To enable memory buffer usage, use
`--use_buffer True` with pytest as shown below.
```
pytest --use_buffer True cudf/benchmarks/
```
* If the datasets directory is different from default, it can be
provided with option "--dataset_dir directory_path" as shown below.
```
pytest --dataset_dir directory_path cudf/benchmarks/
```
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/benchmarks/conftest.py
|
# Copyright (c) 2020, NVIDIA CORPORATION.
option = None
def pytest_addoption(parser):
parser.addoption("--use_buffer", action="store", default=False)
parser.addoption("--dataset_dir", action="store", default="NONE")
def pytest_generate_tests(metafunc):
# This is called for every test. Only get/set command line arguments
# if the argument is specified in the list of test "fixturenames".
option_value = metafunc.config.option.use_buffer
if "use_buffer" in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("use_buffer", [option_value])
def pytest_configure(config):
global option
option = config.option
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/benchmarks/get_datasets.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import argparse
import os
import shutil
from collections import namedtuple
# Update url and dir where datasets needs to be copied
Dataset = namedtuple("Dataset", ["url", "dir"])
datasets = {
"cuio_dataset": Dataset(
"https://data.rapids.ai/cudf/benchmark/avro_json_datasets.zip",
"cudf/benchmarks/cuio_data/",
),
}
def delete_dir(path):
if path == "/" or path == "~":
raise ValueError("Trying to delete root/home directory")
shutil.rmtree(path, ignore_errors=True)
def fetch_datasets(urls, dirs):
tmp_path = os.path.join(os.getcwd(), "tmp_benchmark/")
delete_dir(tmp_path)
os.mkdir(tmp_path)
for url, path in zip(urls, dirs):
path = os.path.join(os.getcwd(), path)
delete_dir(path)
os.mkdir(path)
os.system("wget " + url + " -P " + tmp_path)
os.system(
"unzip " + tmp_path + "/" + url.split("/")[-1] + " -d " + path
)
delete_dir(tmp_path)
urls = []
dirs = []
parser = argparse.ArgumentParser(
description="""
Fetches datasets as per given option.
By default it will download all available datasets
"""
)
parser.add_argument("-u", nargs=1, help="url of a dataset")
parser.add_argument(
"-d",
nargs=1,
help="path where downloaded dataset from given url will be unzipped",
)
parser.add_argument(
"--datasets",
nargs="+",
help="Currently supported datasets are: "
+ ", ".join(list(datasets.keys())),
)
args = parser.parse_args()
if (args.u is None and args.d is not None) or (
args.u is not None and args.d is None
):
raise ValueError(
"option -u and -d should be used together, can't use only one"
)
if args.u and args.d:
urls.append(args.u[0])
dirs.append(args.d[0])
if args.datasets:
for dataset in args.datasets:
urls.append(datasets[dataset].url)
dirs.append(datasets[dataset].dir)
if len(dirs) != len(set(dirs)):
raise ValueError("Duplicate destination paths are provided")
if len(urls) == 0:
for _, val in datasets.items():
urls.append(val.url)
dirs.append(val.dir)
fetch_datasets(urls, dirs)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/quantiles.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from cudf.core.buffer import acquire_spill_lock
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move
from libcpp.vector cimport vector
from cudf._lib.column cimport Column
from cudf._lib.types cimport (
underlying_type_t_interpolation,
underlying_type_t_null_order,
underlying_type_t_order,
underlying_type_t_sorted,
)
from cudf._lib.types import Interpolation
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.quantiles cimport (
quantile as cpp_quantile,
quantiles as cpp_quantile_table,
)
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport interpolation, null_order, order, sorted
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
@acquire_spill_lock()
def quantile(
Column input,
object q,
str interp,
Column ordered_indices,
bool exact,
):
cdef column_view c_input = input.view()
cdef column_view c_ordered_indices = (
column_view() if ordered_indices is None
else ordered_indices.view()
)
cdef interpolation c_interp = <interpolation>(
<underlying_type_t_interpolation> Interpolation[interp.upper()]
)
cdef bool c_exact = exact
cdef vector[double] c_q
c_q.reserve(len(q))
for value in q:
c_q.push_back(value)
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_quantile(
c_input,
c_q,
c_interp,
c_ordered_indices,
c_exact,
)
)
return Column.from_unique_ptr(move(c_result))
def quantile_table(
list source_columns,
vector[double] q,
object interp,
object is_input_sorted,
list column_order,
list null_precedence,
):
cdef table_view c_input = table_view_from_columns(source_columns)
cdef vector[double] c_q = q
cdef interpolation c_interp = <interpolation>(
<underlying_type_t_interpolation> interp
)
cdef sorted c_is_input_sorted = <sorted>(
<underlying_type_t_sorted> is_input_sorted
)
cdef vector[order] c_column_order
cdef vector[null_order] c_null_precedence
c_column_order.reserve(len(column_order))
c_null_precedence.reserve(len(null_precedence))
for value in column_order:
c_column_order.push_back(
<order>(<underlying_type_t_order> value)
)
for value in null_precedence:
c_null_precedence.push_back(
<null_order>(<underlying_type_t_null_order> value)
)
cdef unique_ptr[table] c_result
with nogil:
c_result = move(
cpp_quantile_table(
c_input,
c_q,
c_interp,
c_is_input_sorted,
c_column_order,
c_null_precedence,
)
)
return columns_from_unique_ptr(move(c_result))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/merge.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move
from libcpp.vector cimport vector
cimport cudf._lib.cpp.types as libcudf_types
from cudf._lib.cpp.merge cimport merge as cpp_merge
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
def merge_sorted(
list input_columns,
list key_columns_indices,
bool ascending=True,
str na_position="last",
):
"""Merge multiple lists of lexicographically sorted columns into one list
of sorted columns. `input_columns` is a list of lists of columns to be
merged.
"""
cdef vector[libcudf_types.size_type] c_column_keys = key_columns_indices
cdef vector[table_view] c_input_tables
cdef vector[libcudf_types.order] c_column_order
cdef vector[libcudf_types.null_order] c_null_precedence
c_input_tables.reserve(len(input_columns))
for source_columns in input_columns:
c_input_tables.push_back(
table_view_from_columns(source_columns))
num_keys = len(key_columns_indices)
cdef libcudf_types.order column_order = (
libcudf_types.order.ASCENDING if ascending
else libcudf_types.order.DESCENDING
)
c_column_order = vector[libcudf_types.order](num_keys, column_order)
if not ascending:
na_position = "last" if na_position == "first" else "first"
cdef libcudf_types.null_order null_precedence = (
libcudf_types.null_order.BEFORE if na_position == "first"
else libcudf_types.null_order.AFTER
)
c_null_precedence = vector[libcudf_types.null_order](
num_keys,
null_precedence
)
# Perform sorted merge operation
cdef unique_ptr[table] c_result
with nogil:
c_result = move(
cpp_merge(
c_input_tables,
c_column_keys,
c_column_order,
c_null_precedence,
)
)
return columns_from_unique_ptr(move(c_result))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/null_mask.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from enum import Enum
from rmm._lib.device_buffer cimport DeviceBuffer, device_buffer
from cudf.core.buffer import acquire_spill_lock, as_buffer
from libcpp.memory cimport make_unique, unique_ptr
from libcpp.pair cimport pair
from libcpp.utility cimport move
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.null_mask cimport (
bitmask_allocation_size_bytes as cpp_bitmask_allocation_size_bytes,
bitmask_and as cpp_bitmask_and,
bitmask_or as cpp_bitmask_or,
copy_bitmask as cpp_copy_bitmask,
create_null_mask as cpp_create_null_mask,
underlying_type_t_mask_state,
)
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport mask_state, size_type
from cudf._lib.utils cimport table_view_from_columns
class MaskState(Enum):
"""
Enum for null mask creation state
"""
UNALLOCATED = <underlying_type_t_mask_state> mask_state.UNALLOCATED
UNINITIALIZED = <underlying_type_t_mask_state> mask_state.UNINITIALIZED
ALL_VALID = <underlying_type_t_mask_state> mask_state.ALL_VALID
ALL_NULL = <underlying_type_t_mask_state> mask_state.ALL_NULL
@acquire_spill_lock()
def copy_bitmask(Column col):
"""
Copies column's validity mask buffer into a new buffer, shifting by the
offset if nonzero
"""
if col.base_mask is None:
return None
cdef column_view col_view = col.view()
cdef device_buffer db
cdef unique_ptr[device_buffer] up_db
with nogil:
db = move(cpp_copy_bitmask(col_view))
up_db = move(make_unique[device_buffer](move(db)))
rmm_db = DeviceBuffer.c_from_unique_ptr(move(up_db))
buf = as_buffer(rmm_db)
return buf
def bitmask_allocation_size_bytes(size_type num_bits):
"""
Given a size, calculates the number of bytes that should be allocated for a
column validity mask
"""
cdef size_t output_size
with nogil:
output_size = cpp_bitmask_allocation_size_bytes(num_bits)
return output_size
def create_null_mask(size_type size, state=MaskState.UNINITIALIZED):
"""
Given a size and a mask state, allocate a mask that can properly represent
the given size with the given mask state
Parameters
----------
size : int
Number of elements the mask needs to be able to represent
state : ``MaskState``, default ``MaskState.UNINITIALIZED``
State the null mask should be created in
"""
if not isinstance(state, MaskState):
raise TypeError(
"`state` is required to be of type `MaskState`, got "
+ (type(state).__name__)
)
cdef device_buffer db
cdef unique_ptr[device_buffer] up_db
cdef mask_state c_mask_state = <mask_state>(
<underlying_type_t_mask_state>(state.value)
)
with nogil:
db = move(cpp_create_null_mask(size, c_mask_state))
up_db = move(make_unique[device_buffer](move(db)))
rmm_db = DeviceBuffer.c_from_unique_ptr(move(up_db))
buf = as_buffer(rmm_db)
return buf
@acquire_spill_lock()
def bitmask_and(columns: list):
cdef table_view c_view = table_view_from_columns(columns)
cdef pair[device_buffer, size_type] c_result
cdef unique_ptr[device_buffer] up_db
with nogil:
c_result = move(cpp_bitmask_and(c_view))
up_db = move(make_unique[device_buffer](move(c_result.first)))
dbuf = DeviceBuffer.c_from_unique_ptr(move(up_db))
buf = as_buffer(dbuf)
return buf, c_result.second
@acquire_spill_lock()
def bitmask_or(columns: list):
cdef table_view c_view = table_view_from_columns(columns)
cdef pair[device_buffer, size_type] c_result
cdef unique_ptr[device_buffer] up_db
with nogil:
c_result = move(cpp_bitmask_or(c_view))
up_db = move(make_unique[device_buffer](move(c_result.first)))
dbuf = DeviceBuffer.c_from_unique_ptr(move(up_db))
buf = as_buffer(dbuf)
return buf, c_result.second
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/string_casting.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from cudf._lib.column cimport Column
from cudf._lib.scalar import as_device_scalar
from cudf._lib.scalar cimport DeviceScalar
from cudf._lib.types import SUPPORTED_NUMPY_TO_LIBCUDF_TYPES
from libcpp.memory cimport unique_ptr
from libcpp.string cimport string
from libcpp.utility cimport move
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.scalar.scalar cimport string_scalar
from cudf._lib.cpp.strings.convert.convert_booleans cimport (
from_booleans as cpp_from_booleans,
to_booleans as cpp_to_booleans,
)
from cudf._lib.cpp.strings.convert.convert_datetime cimport (
from_timestamps as cpp_from_timestamps,
is_timestamp as cpp_is_timestamp,
to_timestamps as cpp_to_timestamps,
)
from cudf._lib.cpp.strings.convert.convert_durations cimport (
from_durations as cpp_from_durations,
to_durations as cpp_to_durations,
)
from cudf._lib.cpp.strings.convert.convert_floats cimport (
from_floats as cpp_from_floats,
to_floats as cpp_to_floats,
)
from cudf._lib.cpp.strings.convert.convert_integers cimport (
from_integers as cpp_from_integers,
hex_to_integers as cpp_hex_to_integers,
integers_to_hex as cpp_integers_to_hex,
is_hex as cpp_is_hex,
to_integers as cpp_to_integers,
)
from cudf._lib.cpp.strings.convert.convert_ipv4 cimport (
integers_to_ipv4 as cpp_integers_to_ipv4,
ipv4_to_integers as cpp_ipv4_to_integers,
is_ipv4 as cpp_is_ipv4,
)
from cudf._lib.cpp.types cimport data_type, type_id
from cudf._lib.types cimport underlying_type_t_type_id
import cudf
def floating_to_string(Column input_col):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_floats(
input_column_view))
return Column.from_unique_ptr(move(c_result))
def string_to_floating(Column input_col, object out_type):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[out_type]
)
)
cdef data_type c_out_type = data_type(tid)
with nogil:
c_result = move(
cpp_to_floats(
input_column_view,
c_out_type))
return Column.from_unique_ptr(move(c_result))
def dtos(Column input_col):
"""
Converting/Casting input column of type double to string column
Parameters
----------
input_col : input column of type double
Returns
-------
A Column with double values cast to string
"""
return floating_to_string(input_col)
def stod(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to double
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to double
"""
return string_to_floating(input_col, cudf.dtype("float64"))
def ftos(Column input_col):
"""
Converting/Casting input column of type float to string column
Parameters
----------
input_col : input column of type double
Returns
-------
A Column with float values cast to string
"""
return floating_to_string(input_col)
def stof(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to float
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to float
"""
return string_to_floating(input_col, cudf.dtype("float32"))
def integer_to_string(Column input_col):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_integers(
input_column_view))
return Column.from_unique_ptr(move(c_result))
def string_to_integer(Column input_col, object out_type):
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[out_type]
)
)
cdef data_type c_out_type = data_type(tid)
with nogil:
c_result = move(
cpp_to_integers(
input_column_view,
c_out_type))
return Column.from_unique_ptr(move(c_result))
def i8tos(Column input_col):
"""
Converting/Casting input column of type int8 to string column
Parameters
----------
input_col : input column of type int8
Returns
-------
A Column with int8 values cast to string
"""
return integer_to_string(input_col)
def stoi8(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to int8
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int8
"""
return string_to_integer(input_col, cudf.dtype("int8"))
def i16tos(Column input_col):
"""
Converting/Casting input column of type int16 to string column
Parameters
----------
input_col : input column of type int16
Returns
-------
A Column with int16 values cast to string
"""
return integer_to_string(input_col)
def stoi16(Column input_col):
"""
Converting/Casting input column of type string to int16
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int16
"""
return string_to_integer(input_col, cudf.dtype("int16"))
def itos(Column input_col):
"""
Converting/Casting input column of type int32 to string column
Parameters
----------
input_col : input column of type int32
Returns
-------
A Column with int32 values cast to string
"""
return integer_to_string(input_col)
def stoi(Column input_col):
"""
Converting/Casting input column of type string to int32
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int32
"""
return string_to_integer(input_col, cudf.dtype("int32"))
def ltos(Column input_col):
"""
Converting/Casting input column of type int64 to string column
Parameters
----------
input_col : input column of type int64
Returns
-------
A Column with int64 values cast to string
"""
return integer_to_string(input_col)
def stol(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to int64
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to int64
"""
return string_to_integer(input_col, cudf.dtype("int64"))
def ui8tos(Column input_col):
"""
Converting/Casting input column of type uint8 to string column
Parameters
----------
input_col : input column of type uint8
Returns
-------
A Column with uint8 values cast to string
"""
return integer_to_string(input_col)
def stoui8(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint8
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint8
"""
return string_to_integer(input_col, cudf.dtype("uint8"))
def ui16tos(Column input_col):
"""
Converting/Casting input column of type uint16 to string column
Parameters
----------
input_col : input column of type uint16
Returns
-------
A Column with uint16 values cast to string
"""
return integer_to_string(input_col)
def stoui16(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint16
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint16
"""
return string_to_integer(input_col, cudf.dtype("uint16"))
def uitos(Column input_col):
"""
Converting/Casting input column of type uint32 to string column
Parameters
----------
input_col : input column of type uint32
Returns
-------
A Column with uint32 values cast to string
"""
return integer_to_string(input_col)
def stoui(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint32
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint32
"""
return string_to_integer(input_col, cudf.dtype("uint32"))
def ultos(Column input_col):
"""
Converting/Casting input column of type uint64 to string column
Parameters
----------
input_col : input column of type uint64
Returns
-------
A Column with uint64 values cast to string
"""
return integer_to_string(input_col)
def stoul(Column input_col, **kwargs):
"""
Converting/Casting input column of type string to uint64
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with strings cast to uint64
"""
return string_to_integer(input_col, cudf.dtype("uint64"))
def _to_booleans(Column input_col, object string_true="True"):
"""
Converting/Casting input column of type string to boolean column
Parameters
----------
input_col : input column of type string
string_true : string that represents True
Returns
-------
A Column with string values cast to boolean
"""
cdef DeviceScalar str_true = as_device_scalar(string_true)
cdef column_view input_column_view = input_col.view()
cdef const string_scalar* string_scalar_true = <const string_scalar*>(
str_true.get_raw_ptr())
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_to_booleans(
input_column_view,
string_scalar_true[0]))
return Column.from_unique_ptr(move(c_result))
def to_booleans(Column input_col, **kwargs):
return _to_booleans(input_col)
def _from_booleans(
Column input_col,
object string_true="True",
object string_false="False"):
"""
Converting/Casting input column of type boolean to string column
Parameters
----------
input_col : input column of type boolean
string_true : string that represents True
string_false : string that represents False
Returns
-------
A Column with boolean values cast to string
"""
cdef DeviceScalar str_true = as_device_scalar(string_true)
cdef DeviceScalar str_false = as_device_scalar(string_false)
cdef column_view input_column_view = input_col.view()
cdef const string_scalar* string_scalar_true = <const string_scalar*>(
str_true.get_raw_ptr())
cdef const string_scalar* string_scalar_false = <const string_scalar*>(
str_false.get_raw_ptr())
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_booleans(
input_column_view,
string_scalar_true[0],
string_scalar_false[0]))
return Column.from_unique_ptr(move(c_result))
def from_booleans(Column input_col):
return _from_booleans(input_col)
def int2timestamp(
Column input_col,
str format,
Column names):
"""
Converting/Casting input date-time column to string
column with specified format
Parameters
----------
input_col : input column of type timestamp in integer format
format : The string specifying output format
names : The string names to use for weekdays ("%a", "%A") and
months ("%b", "%B")
Returns
-------
A Column with date-time represented in string format
"""
cdef column_view input_column_view = input_col.view()
cdef string c_timestamp_format = format.encode("UTF-8")
cdef column_view input_strings_names = names.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_timestamps(
input_column_view,
c_timestamp_format,
input_strings_names))
return Column.from_unique_ptr(move(c_result))
def timestamp2int(Column input_col, dtype, format):
"""
Converting/Casting input string column to date-time column with specified
timestamp_format
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with string represented in date-time format
"""
cdef column_view input_column_view = input_col.view()
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[dtype]
)
)
cdef data_type out_type = data_type(tid)
cdef string c_timestamp_format = format.encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_to_timestamps(
input_column_view,
out_type,
c_timestamp_format))
return Column.from_unique_ptr(move(c_result))
def istimestamp(
Column input_col,
object format,
**kwargs):
"""
Check input string column matches the specified timestamp format
Parameters
----------
input_col : input column of type string
format : format string of timestamp specifiers
Returns
-------
A Column of boolean values identifying strings that matched the format.
"""
if input_col.size == 0:
return cudf.core.column.as_column([], dtype=kwargs.get('dtype'))
cdef column_view input_column_view = input_col.view()
cdef string c_timestamp_format = <string>str(format).encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_is_timestamp(
input_column_view,
c_timestamp_format))
return Column.from_unique_ptr(move(c_result))
def timedelta2int(Column input_col, dtype, format):
"""
Converting/Casting input string column to TimeDelta column with specified
format
Parameters
----------
input_col : input column of type string
Returns
-------
A Column with string represented in TimeDelta format
"""
cdef column_view input_column_view = input_col.view()
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[dtype]
)
)
cdef data_type out_type = data_type(tid)
cdef string c_duration_format = format.encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_to_durations(
input_column_view,
out_type,
c_duration_format))
return Column.from_unique_ptr(move(c_result))
def int2timedelta(
Column input_col,
**kwargs):
"""
Converting/Casting input Timedelta column to string
column with specified format
Parameters
----------
input_col : input column of type Timedelta in integer format
Returns
-------
A Column with Timedelta represented in string format
"""
cdef column_view input_column_view = input_col.view()
cdef string c_duration_format = kwargs.get(
'format', "%D days %H:%M:%S").encode('UTF-8')
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_from_durations(
input_column_view,
c_duration_format))
return Column.from_unique_ptr(move(c_result))
def int2ip(Column input_col, **kwargs):
"""
Converting/Casting integer column to string column in ipv4 format
Parameters
----------
input_col : input integer column
Returns
-------
A Column with integer represented in string ipv4 format
"""
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_integers_to_ipv4(input_column_view))
return Column.from_unique_ptr(move(c_result))
def ip2int(Column input_col, **kwargs):
"""
Converting string ipv4 column to integer column
Parameters
----------
input_col : input string column
Returns
-------
A Column with ipv4 represented as integer
"""
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_ipv4_to_integers(input_column_view))
return Column.from_unique_ptr(move(c_result))
def is_ipv4(Column source_strings):
"""
Returns a Column of boolean values with True for `source_strings`
that have strings in IPv4 format. This format is nnn.nnn.nnn.nnn
where nnn is integer digits in [0,255].
"""
cdef unique_ptr[column] c_result
cdef column_view source_view = source_strings.view()
with nogil:
c_result = move(cpp_is_ipv4(
source_view
))
return Column.from_unique_ptr(move(c_result))
def htoi(Column input_col, **kwargs):
"""
Converting input column of type string having hex values
to integer of out_type
Parameters
----------
input_col : input column of type string
out_type : The type of integer column expected
Returns
-------
A Column of integers parsed from hexadecimal string values.
"""
cdef column_view input_column_view = input_col.view()
cdef type_id tid = <type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[
kwargs.get('dtype', cudf.dtype("int64"))
]
)
)
cdef data_type c_out_type = data_type(tid)
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_hex_to_integers(input_column_view,
c_out_type))
return Column.from_unique_ptr(move(c_result))
def is_hex(Column source_strings):
"""
Returns a Column of boolean values with True for `source_strings`
that have hex characters.
"""
cdef unique_ptr[column] c_result
cdef column_view source_view = source_strings.view()
with nogil:
c_result = move(cpp_is_hex(
source_view
))
return Column.from_unique_ptr(move(c_result))
def itoh(Column input_col):
"""
Converting input column of type integer to a string
column with hexadecimal character digits.
Parameters
----------
input_col : input column of type integer
Returns
-------
A Column of strings with hexadecimal characters.
"""
cdef column_view input_column_view = input_col.view()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(
cpp_integers_to_hex(input_column_view))
return Column.from_unique_ptr(move(c_result))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/expressions.pyx
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from enum import Enum
from cython.operator cimport dereference
from libc.stdint cimport int64_t
from libcpp.memory cimport make_unique, unique_ptr
from libcpp.string cimport string
from libcpp.utility cimport move
from cudf._lib.cpp cimport expressions as libcudf_exp
from cudf._lib.cpp.types cimport size_type
# Necessary for proper casting, see below.
ctypedef int32_t underlying_type_ast_operator
# Aliases for simplicity
ctypedef unique_ptr[libcudf_exp.expression] expression_ptr
class ASTOperator(Enum):
ADD = libcudf_exp.ast_operator.ADD
SUB = libcudf_exp.ast_operator.SUB
MUL = libcudf_exp.ast_operator.MUL
DIV = libcudf_exp.ast_operator.DIV
TRUE_DIV = libcudf_exp.ast_operator.TRUE_DIV
FLOOR_DIV = libcudf_exp.ast_operator.FLOOR_DIV
MOD = libcudf_exp.ast_operator.MOD
PYMOD = libcudf_exp.ast_operator.PYMOD
POW = libcudf_exp.ast_operator.POW
EQUAL = libcudf_exp.ast_operator.EQUAL
NULL_EQUAL = libcudf_exp.ast_operator.NULL_EQUAL
NOT_EQUAL = libcudf_exp.ast_operator.NOT_EQUAL
LESS = libcudf_exp.ast_operator.LESS
GREATER = libcudf_exp.ast_operator.GREATER
LESS_EQUAL = libcudf_exp.ast_operator.LESS_EQUAL
GREATER_EQUAL = libcudf_exp.ast_operator.GREATER_EQUAL
BITWISE_AND = libcudf_exp.ast_operator.BITWISE_AND
BITWISE_OR = libcudf_exp.ast_operator.BITWISE_OR
BITWISE_XOR = libcudf_exp.ast_operator.BITWISE_XOR
LOGICAL_AND = libcudf_exp.ast_operator.LOGICAL_AND
NULL_LOGICAL_AND = libcudf_exp.ast_operator.NULL_LOGICAL_AND
LOGICAL_OR = libcudf_exp.ast_operator.LOGICAL_OR
NULL_LOGICAL_OR = libcudf_exp.ast_operator.NULL_LOGICAL_OR
# Unary operators
IDENTITY = libcudf_exp.ast_operator.IDENTITY
IS_NULL = libcudf_exp.ast_operator.IS_NULL
SIN = libcudf_exp.ast_operator.SIN
COS = libcudf_exp.ast_operator.COS
TAN = libcudf_exp.ast_operator.TAN
ARCSIN = libcudf_exp.ast_operator.ARCSIN
ARCCOS = libcudf_exp.ast_operator.ARCCOS
ARCTAN = libcudf_exp.ast_operator.ARCTAN
SINH = libcudf_exp.ast_operator.SINH
COSH = libcudf_exp.ast_operator.COSH
TANH = libcudf_exp.ast_operator.TANH
ARCSINH = libcudf_exp.ast_operator.ARCSINH
ARCCOSH = libcudf_exp.ast_operator.ARCCOSH
ARCTANH = libcudf_exp.ast_operator.ARCTANH
EXP = libcudf_exp.ast_operator.EXP
LOG = libcudf_exp.ast_operator.LOG
SQRT = libcudf_exp.ast_operator.SQRT
CBRT = libcudf_exp.ast_operator.CBRT
CEIL = libcudf_exp.ast_operator.CEIL
FLOOR = libcudf_exp.ast_operator.FLOOR
ABS = libcudf_exp.ast_operator.ABS
RINT = libcudf_exp.ast_operator.RINT
BIT_INVERT = libcudf_exp.ast_operator.BIT_INVERT
NOT = libcudf_exp.ast_operator.NOT
class TableReference(Enum):
LEFT = libcudf_exp.table_reference.LEFT
RIGHT = libcudf_exp.table_reference.RIGHT
# Note that this function only currently supports numeric literals. libcudf
# expressions don't really support other types yet though, so this isn't
# restrictive at the moment.
cdef class Literal(Expression):
def __cinit__(self, value):
if isinstance(value, int):
self.c_scalar.reset(new numeric_scalar[int64_t](value, True))
self.c_obj = <expression_ptr> move(make_unique[libcudf_exp.literal](
<numeric_scalar[int64_t] &>dereference(self.c_scalar)
))
elif isinstance(value, float):
self.c_scalar.reset(new numeric_scalar[double](value, True))
self.c_obj = <expression_ptr> move(make_unique[libcudf_exp.literal](
<numeric_scalar[double] &>dereference(self.c_scalar)
))
elif isinstance(value, str):
self.c_scalar.reset(new string_scalar(value.encode(), True))
self.c_obj = <expression_ptr> move(make_unique[libcudf_exp.literal](
<string_scalar &>dereference(self.c_scalar)
))
cdef class ColumnReference(Expression):
def __cinit__(self, size_type index):
self.c_obj = <expression_ptr>move(make_unique[libcudf_exp.column_reference](
index
))
cdef class Operation(Expression):
def __cinit__(self, op, Expression left, Expression right=None):
cdef libcudf_exp.ast_operator op_value = <libcudf_exp.ast_operator>(
<underlying_type_ast_operator> op.value
)
if right is None:
self.c_obj = <expression_ptr> move(make_unique[libcudf_exp.operation](
op_value, dereference(left.c_obj)
))
else:
self.c_obj = <expression_ptr> move(make_unique[libcudf_exp.operation](
op_value, dereference(left.c_obj), dereference(right.c_obj)
))
cdef class ColumnNameReference(Expression):
def __cinit__(self, string name):
self.c_obj = <expression_ptr> \
move(make_unique[libcudf_exp.column_name_reference](name))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/parquet.pyx
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
# cython: boundscheck = False
import io
import pyarrow as pa
import cudf
from cudf.core.buffer import acquire_spill_lock
try:
import ujson as json
except ImportError:
import json
import numpy as np
from cython.operator cimport dereference
from cudf.api.types import (
is_decimal_dtype,
is_list_dtype,
is_list_like,
is_struct_dtype,
)
from cudf._lib.utils cimport data_from_unique_ptr
from cudf._lib.utils import _index_level_name, generate_pandas_metadata
from libc.stdint cimport uint8_t
from libcpp cimport bool
from libcpp.map cimport map
from libcpp.memory cimport make_unique, unique_ptr
from libcpp.string cimport string
from libcpp.unordered_map cimport unordered_map
from libcpp.utility cimport move
from libcpp.vector cimport vector
cimport cudf._lib.cpp.io.data_sink as cudf_io_data_sink
cimport cudf._lib.cpp.io.types as cudf_io_types
cimport cudf._lib.cpp.types as cudf_types
from cudf._lib.column cimport Column
from cudf._lib.cpp.io.parquet cimport (
chunked_parquet_writer_options,
merge_row_group_metadata as parquet_merge_metadata,
parquet_chunked_writer as cpp_parquet_chunked_writer,
parquet_reader_options,
parquet_writer_options,
read_parquet as parquet_reader,
write_parquet as parquet_writer,
)
from cudf._lib.cpp.io.types cimport column_in_metadata, table_input_metadata
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport data_type, size_type
from cudf._lib.io.datasource cimport NativeFileDatasource
from cudf._lib.io.utils cimport (
make_sinks_info,
make_source_info,
update_struct_field_names,
)
from cudf._lib.utils cimport table_view_from_table
from pyarrow.lib import NativeFile
from cudf.utils.ioutils import _ROW_GROUP_SIZE_BYTES_DEFAULT
cdef class BufferArrayFromVector:
cdef Py_ssize_t length
cdef unique_ptr[vector[uint8_t]] in_vec
# these two things declare part of the buffer interface
cdef Py_ssize_t shape[1]
cdef Py_ssize_t strides[1]
@staticmethod
cdef BufferArrayFromVector from_unique_ptr(
unique_ptr[vector[uint8_t]] in_vec
):
cdef BufferArrayFromVector buf = BufferArrayFromVector()
buf.in_vec = move(in_vec)
buf.length = dereference(buf.in_vec).size()
return buf
def __getbuffer__(self, Py_buffer *buffer, int flags):
cdef Py_ssize_t itemsize = sizeof(uint8_t)
self.shape[0] = self.length
self.strides[0] = 1
buffer.buf = dereference(self.in_vec).data()
buffer.format = NULL # byte
buffer.internal = NULL
buffer.itemsize = itemsize
buffer.len = self.length * itemsize # product(shape) * itemsize
buffer.ndim = 1
buffer.obj = self
buffer.readonly = 0
buffer.shape = self.shape
buffer.strides = self.strides
buffer.suboffsets = NULL
def __releasebuffer__(self, Py_buffer *buffer):
pass
def _parse_metadata(meta):
file_is_range_index = False
file_index_cols = None
if 'index_columns' in meta and len(meta['index_columns']) > 0:
file_index_cols = meta['index_columns']
if isinstance(file_index_cols[0], dict) and \
file_index_cols[0]['kind'] == 'range':
file_is_range_index = True
return file_is_range_index, file_index_cols
cpdef read_parquet(filepaths_or_buffers, columns=None, row_groups=None,
use_pandas_metadata=True):
"""
Cython function to call into libcudf API, see `read_parquet`.
See Also
--------
cudf.io.parquet.read_parquet
cudf.io.parquet.to_parquet
"""
# Convert NativeFile buffers to NativeFileDatasource,
# but save original buffers in case we need to use
# pyarrow for metadata processing
# (See: https://github.com/rapidsai/cudf/issues/9599)
pa_buffers = []
for i, datasource in enumerate(filepaths_or_buffers):
if isinstance(datasource, NativeFile):
pa_buffers.append(datasource)
filepaths_or_buffers[i] = NativeFileDatasource(datasource)
cdef cudf_io_types.source_info source = make_source_info(
filepaths_or_buffers)
cdef bool cpp_use_pandas_metadata = use_pandas_metadata
cdef vector[vector[size_type]] cpp_row_groups
cdef data_type cpp_timestamp_type = cudf_types.data_type(
cudf_types.type_id.EMPTY
)
if row_groups is not None:
cpp_row_groups = row_groups
cdef parquet_reader_options args
# Setup parquet reader arguments
args = move(
parquet_reader_options.builder(source)
.row_groups(cpp_row_groups)
.use_pandas_metadata(cpp_use_pandas_metadata)
.timestamp_type(cpp_timestamp_type)
.build()
)
cdef vector[string] cpp_columns
allow_range_index = True
if columns is not None:
cpp_columns.reserve(len(columns))
allow_range_index = len(columns) > 0
for col in columns:
cpp_columns.push_back(str(col).encode())
args.set_columns(cpp_columns)
# Read Parquet
cdef cudf_io_types.table_with_metadata c_result
with nogil:
c_result = move(parquet_reader(args))
names = [info.name.decode() for info in c_result.metadata.schema_info]
# Access the Parquet per_file_user_data to find the index
index_col = None
cdef vector[unordered_map[string, string]] per_file_user_data = \
c_result.metadata.per_file_user_data
index_col_names = None
is_range_index = True
for single_file in per_file_user_data:
json_str = single_file[b'pandas'].decode('utf-8')
meta = None
if json_str != "":
meta = json.loads(json_str)
file_is_range_index, index_col = _parse_metadata(meta)
is_range_index &= file_is_range_index
if not file_is_range_index and index_col is not None \
and index_col_names is None:
index_col_names = {}
for idx_col in index_col:
for c in meta['columns']:
if c['field_name'] == idx_col:
index_col_names[idx_col] = c['name']
df = cudf.DataFrame._from_data(*data_from_unique_ptr(
move(c_result.tbl),
column_names=names
))
update_struct_field_names(df, c_result.metadata.schema_info)
if meta is not None:
# Book keep each column metadata as the order
# of `meta["columns"]` and `column_names` are not
# guaranteed to be deterministic and same always.
meta_data_per_column = {
col_meta['name']: col_meta for col_meta in meta["columns"]
}
# update the decimal precision of each column
for col in names:
if is_decimal_dtype(df._data[col].dtype):
df._data[col].dtype.precision = (
meta_data_per_column[col]["metadata"]["precision"]
)
# Set the index column
if index_col is not None and len(index_col) > 0:
if is_range_index:
if not allow_range_index:
return df
if len(per_file_user_data) > 1:
range_index_meta = {
"kind": "range",
"name": None,
"start": 0,
"stop": len(df),
"step": 1
}
else:
range_index_meta = index_col[0]
if row_groups is not None:
per_file_metadata = [
pa.parquet.read_metadata(
# Pyarrow cannot read directly from bytes
io.BytesIO(s) if isinstance(s, bytes) else s
) for s in (
pa_buffers or filepaths_or_buffers
)
]
filtered_idx = []
for i, file_meta in enumerate(per_file_metadata):
row_groups_i = []
start = 0
for row_group in range(file_meta.num_row_groups):
stop = start + file_meta.row_group(row_group).num_rows
row_groups_i.append((start, stop))
start = stop
for rg in row_groups[i]:
filtered_idx.append(
cudf.RangeIndex(
start=row_groups_i[rg][0],
stop=row_groups_i[rg][1],
step=range_index_meta['step']
)
)
if len(filtered_idx) > 0:
idx = cudf.concat(filtered_idx)
else:
idx = cudf.Index(cudf.core.column.column_empty(0))
else:
idx = cudf.RangeIndex(
start=range_index_meta['start'],
stop=range_index_meta['stop'],
step=range_index_meta['step'],
name=range_index_meta['name']
)
df._index = idx
elif set(index_col).issubset(names):
index_data = df[index_col]
actual_index_names = list(index_col_names.values())
if len(index_data._data) == 1:
idx = cudf.Index(
index_data._data.columns[0],
name=actual_index_names[0]
)
else:
idx = cudf.MultiIndex.from_frame(
index_data,
names=actual_index_names
)
df.drop(columns=index_col, inplace=True)
df._index = idx
else:
if use_pandas_metadata:
df.index.names = index_col
return df
@acquire_spill_lock()
def write_parquet(
table,
object filepaths_or_buffers,
object index=None,
object compression="snappy",
object statistics="ROWGROUP",
object metadata_file_path=None,
object int96_timestamps=False,
object row_group_size_bytes=_ROW_GROUP_SIZE_BYTES_DEFAULT,
object row_group_size_rows=None,
object max_page_size_bytes=None,
object max_page_size_rows=None,
object partitions_info=None,
object force_nullable_schema=False,
header_version="1.0",
use_dictionary=True,
):
"""
Cython function to call into libcudf API, see `write_parquet`.
See Also
--------
cudf.io.parquet.write_parquet
"""
# Create the write options
cdef table_input_metadata tbl_meta
cdef vector[map[string, string]] user_data
cdef table_view tv
cdef vector[unique_ptr[cudf_io_data_sink.data_sink]] _data_sinks
cdef cudf_io_types.sink_info sink = make_sinks_info(
filepaths_or_buffers, _data_sinks
)
if index is True or (
index is None and not isinstance(table._index, cudf.RangeIndex)
):
tv = table_view_from_table(table)
tbl_meta = table_input_metadata(tv)
for level, idx_name in enumerate(table._index.names):
tbl_meta.column_metadata[level].set_name(
str.encode(
_index_level_name(idx_name, level, table._column_names)
)
)
num_index_cols_meta = len(table._index.names)
else:
tv = table_view_from_table(table, ignore_index=True)
tbl_meta = table_input_metadata(tv)
num_index_cols_meta = 0
for i, name in enumerate(table._column_names, num_index_cols_meta):
if not isinstance(name, str):
raise ValueError("parquet must have string column names")
tbl_meta.column_metadata[i].set_name(name.encode())
_set_col_metadata(
table[name]._column,
tbl_meta.column_metadata[i],
force_nullable_schema
)
cdef map[string, string] tmp_user_data
if partitions_info is not None:
for start_row, num_row in partitions_info:
partitioned_df = table.iloc[start_row: start_row + num_row].copy(
deep=False
)
pandas_metadata = generate_pandas_metadata(partitioned_df, index)
tmp_user_data[str.encode("pandas")] = str.encode(pandas_metadata)
user_data.push_back(tmp_user_data)
tmp_user_data.clear()
else:
pandas_metadata = generate_pandas_metadata(table, index)
tmp_user_data[str.encode("pandas")] = str.encode(pandas_metadata)
user_data.push_back(tmp_user_data)
if header_version not in ("1.0", "2.0"):
raise ValueError(
f"Invalid parquet header version: {header_version}. "
"Valid values are '1.0' and '2.0'"
)
dict_policy = (
cudf_io_types.dictionary_policy.ALWAYS
if use_dictionary
else cudf_io_types.dictionary_policy.NEVER
)
cdef cudf_io_types.compression_type comp_type = _get_comp_type(compression)
cdef cudf_io_types.statistics_freq stat_freq = _get_stat_freq(statistics)
cdef unique_ptr[vector[uint8_t]] out_metadata_c
cdef vector[string] c_column_chunks_file_paths
cdef bool _int96_timestamps = int96_timestamps
cdef vector[cudf_io_types.partition_info] partitions
# Perform write
cdef parquet_writer_options args = move(
parquet_writer_options.builder(sink, tv)
.metadata(tbl_meta)
.key_value_metadata(move(user_data))
.compression(comp_type)
.stats_level(stat_freq)
.int96_timestamps(_int96_timestamps)
.write_v2_headers(header_version == "2.0")
.dictionary_policy(dict_policy)
.utc_timestamps(False)
.build()
)
if partitions_info is not None:
partitions.reserve(len(partitions_info))
for part in partitions_info:
partitions.push_back(
cudf_io_types.partition_info(part[0], part[1])
)
args.set_partitions(move(partitions))
if metadata_file_path is not None:
if is_list_like(metadata_file_path):
for path in metadata_file_path:
c_column_chunks_file_paths.push_back(str.encode(path))
else:
c_column_chunks_file_paths.push_back(
str.encode(metadata_file_path)
)
args.set_column_chunks_file_paths(move(c_column_chunks_file_paths))
if row_group_size_bytes is not None:
args.set_row_group_size_bytes(row_group_size_bytes)
if row_group_size_rows is not None:
args.set_row_group_size_rows(row_group_size_rows)
if max_page_size_bytes is not None:
args.set_max_page_size_bytes(max_page_size_bytes)
if max_page_size_rows is not None:
args.set_max_page_size_rows(max_page_size_rows)
with nogil:
out_metadata_c = move(parquet_writer(args))
if metadata_file_path is not None:
out_metadata_py = BufferArrayFromVector.from_unique_ptr(
move(out_metadata_c)
)
return np.asarray(out_metadata_py)
else:
return None
cdef class ParquetWriter:
"""
ParquetWriter lets you incrementally write out a Parquet file from a series
of cudf tables
Parameters
----------
filepath_or_buffer : str, io.IOBase, os.PathLike, or list
File path or buffer to write to. The argument may also correspond
to a list of file paths or buffers.
index : bool or None, default None
If ``True``, include a dataframe's index(es) in the file output.
If ``False``, they will not be written to the file. If ``None``,
index(es) other than RangeIndex will be saved as columns.
compression : {'snappy', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
statistics : {'ROWGROUP', 'PAGE', 'COLUMN', 'NONE'}, default 'ROWGROUP'
Level at which column statistics should be included in file.
row_group_size_bytes: int, default 134217728
Maximum size of each stripe of the output.
By default, 134217728 (128MB) will be used.
row_group_size_rows: int, default 1000000
Maximum number of rows of each stripe of the output.
By default, 1000000 (10^6 rows) will be used.
max_page_size_bytes: int, default 524288
Maximum uncompressed size of each page of the output.
By default, 524288 (512KB) will be used.
max_page_size_rows: int, default 20000
Maximum number of rows of each page of the output.
By default, 20000 will be used.
See Also
--------
cudf.io.parquet.write_parquet
"""
cdef bool initialized
cdef unique_ptr[cpp_parquet_chunked_writer] writer
cdef table_input_metadata tbl_meta
cdef cudf_io_types.sink_info sink
cdef vector[unique_ptr[cudf_io_data_sink.data_sink]] _data_sink
cdef cudf_io_types.statistics_freq stat_freq
cdef cudf_io_types.compression_type comp_type
cdef object index
cdef size_t row_group_size_bytes
cdef size_type row_group_size_rows
cdef size_t max_page_size_bytes
cdef size_type max_page_size_rows
def __cinit__(self, object filepath_or_buffer, object index=None,
object compression="snappy", str statistics="ROWGROUP",
int row_group_size_bytes=_ROW_GROUP_SIZE_BYTES_DEFAULT,
int row_group_size_rows=1000000,
int max_page_size_bytes=524288,
int max_page_size_rows=20000):
filepaths_or_buffers = (
list(filepath_or_buffer)
if is_list_like(filepath_or_buffer)
else [filepath_or_buffer]
)
self.sink = make_sinks_info(filepaths_or_buffers, self._data_sink)
self.stat_freq = _get_stat_freq(statistics)
self.comp_type = _get_comp_type(compression)
self.index = index
self.initialized = False
self.row_group_size_bytes = row_group_size_bytes
self.row_group_size_rows = row_group_size_rows
self.max_page_size_bytes = max_page_size_bytes
self.max_page_size_rows = max_page_size_rows
def write_table(self, table, object partitions_info=None):
""" Writes a single table to the file """
if not self.initialized:
self._initialize_chunked_state(
table,
num_partitions=len(partitions_info) if partitions_info else 1
)
cdef table_view tv
if self.index is not False and (
table._index.name is not None or
isinstance(table._index, cudf.core.multiindex.MultiIndex)):
tv = table_view_from_table(table)
else:
tv = table_view_from_table(table, ignore_index=True)
cdef vector[cudf_io_types.partition_info] partitions
if partitions_info is not None:
for part in partitions_info:
partitions.push_back(
cudf_io_types.partition_info(part[0], part[1])
)
with nogil:
self.writer.get()[0].write(tv, partitions)
def close(self, object metadata_file_path=None):
cdef unique_ptr[vector[uint8_t]] out_metadata_c
cdef vector[string] column_chunks_file_paths
if not self.initialized:
return None
# Update metadata-collection options
if metadata_file_path is not None:
if is_list_like(metadata_file_path):
for path in metadata_file_path:
column_chunks_file_paths.push_back(str.encode(path))
else:
column_chunks_file_paths.push_back(
str.encode(metadata_file_path)
)
with nogil:
out_metadata_c = move(
self.writer.get()[0].close(column_chunks_file_paths)
)
if metadata_file_path is not None:
out_metadata_py = BufferArrayFromVector.from_unique_ptr(
move(out_metadata_c)
)
return np.asarray(out_metadata_py)
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def _initialize_chunked_state(self, table, num_partitions=1):
""" Prepares all the values required to build the
chunked_parquet_writer_options and creates a writer"""
cdef table_view tv
# Set the table_metadata
num_index_cols_meta = 0
self.tbl_meta = table_input_metadata(
table_view_from_table(table, ignore_index=True))
if self.index is not False:
if isinstance(table._index, cudf.core.multiindex.MultiIndex):
tv = table_view_from_table(table)
self.tbl_meta = table_input_metadata(tv)
for level, idx_name in enumerate(table._index.names):
self.tbl_meta.column_metadata[level].set_name(
(str.encode(idx_name))
)
num_index_cols_meta = len(table._index.names)
else:
if table._index.name is not None:
tv = table_view_from_table(table)
self.tbl_meta = table_input_metadata(tv)
self.tbl_meta.column_metadata[0].set_name(
str.encode(table._index.name)
)
num_index_cols_meta = 1
for i, name in enumerate(table._column_names, num_index_cols_meta):
self.tbl_meta.column_metadata[i].set_name(name.encode())
_set_col_metadata(
table[name]._column,
self.tbl_meta.column_metadata[i],
)
index = (
False if isinstance(table._index, cudf.RangeIndex) else self.index
)
pandas_metadata = generate_pandas_metadata(table, index)
cdef map[string, string] tmp_user_data
tmp_user_data[str.encode("pandas")] = str.encode(pandas_metadata)
cdef vector[map[string, string]] user_data
user_data = vector[map[string, string]](num_partitions, tmp_user_data)
cdef chunked_parquet_writer_options args
with nogil:
args = move(
chunked_parquet_writer_options.builder(self.sink)
.metadata(self.tbl_meta)
.key_value_metadata(move(user_data))
.compression(self.comp_type)
.stats_level(self.stat_freq)
.row_group_size_bytes(self.row_group_size_bytes)
.row_group_size_rows(self.row_group_size_rows)
.max_page_size_bytes(self.max_page_size_bytes)
.max_page_size_rows(self.max_page_size_rows)
.build()
)
self.writer.reset(new cpp_parquet_chunked_writer(args))
self.initialized = True
cpdef merge_filemetadata(object filemetadata_list):
"""
Cython function to call into libcudf API, see `merge_row_group_metadata`.
See Also
--------
cudf.io.parquet.merge_row_group_metadata
"""
cdef vector[unique_ptr[vector[uint8_t]]] list_c
cdef vector[uint8_t] blob_c
cdef unique_ptr[vector[uint8_t]] output_c
for blob_py in filemetadata_list:
blob_c = blob_py
list_c.push_back(move(make_unique[vector[uint8_t]](blob_c)))
with nogil:
output_c = move(parquet_merge_metadata(list_c))
out_metadata_py = BufferArrayFromVector.from_unique_ptr(move(output_c))
return np.asarray(out_metadata_py)
cdef cudf_io_types.statistics_freq _get_stat_freq(object statistics):
statistics = str(statistics).upper()
if statistics == "NONE":
return cudf_io_types.statistics_freq.STATISTICS_NONE
elif statistics == "ROWGROUP":
return cudf_io_types.statistics_freq.STATISTICS_ROWGROUP
elif statistics == "PAGE":
return cudf_io_types.statistics_freq.STATISTICS_PAGE
elif statistics == "COLUMN":
return cudf_io_types.statistics_freq.STATISTICS_COLUMN
else:
raise ValueError("Unsupported `statistics_freq` type")
cdef cudf_io_types.compression_type _get_comp_type(object compression):
if compression is None:
return cudf_io_types.compression_type.NONE
elif compression == "snappy":
return cudf_io_types.compression_type.SNAPPY
elif compression == "ZSTD":
return cudf_io_types.compression_type.ZSTD
else:
raise ValueError("Unsupported `compression` type")
cdef _set_col_metadata(
Column col,
column_in_metadata& col_meta,
bool force_nullable_schema=False,
):
if force_nullable_schema:
# Only set nullability if `force_nullable_schema`
# is true.
col_meta.set_nullability(True)
if is_struct_dtype(col):
for i, (child_col, name) in enumerate(
zip(col.children, list(col.dtype.fields))
):
col_meta.child(i).set_name(name.encode())
_set_col_metadata(
child_col,
col_meta.child(i),
force_nullable_schema
)
elif is_list_dtype(col):
_set_col_metadata(
col.children[1],
col_meta.child(1),
force_nullable_schema
)
else:
if is_decimal_dtype(col):
col_meta.set_decimal_precision(col.dtype.precision)
return
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/sort.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from itertools import repeat
from cudf.core.buffer import acquire_spill_lock
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move, pair
from libcpp.vector cimport vector
from cudf._lib.column cimport Column
from cudf._lib.cpp.aggregation cimport (
rank_method,
underlying_type_t_rank_method,
)
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.search cimport lower_bound, upper_bound
from cudf._lib.cpp.sorting cimport (
is_sorted as cpp_is_sorted,
rank,
segmented_sort_by_key as cpp_segmented_sort_by_key,
sort as cpp_sort,
sort_by_key as cpp_sort_by_key,
sorted_order,
stable_segmented_sort_by_key as cpp_stable_segmented_sort_by_key,
stable_sort_by_key as cpp_stable_sort_by_key,
stable_sorted_order,
)
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport null_order, null_policy, order as cpp_order
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
@acquire_spill_lock()
def is_sorted(
list source_columns, object ascending=None, object null_position=None
):
"""
Checks whether the rows of a `table` are sorted in lexicographical order.
Parameters
----------
source_columns : list of columns
columns to be checked for sort order
ascending : None or list-like of booleans
None or list-like of boolean values indicating expected sort order of
each column. If list-like, size of list-like must be len(columns). If
None, all columns expected sort order is set to ascending. False (0) -
descending, True (1) - ascending.
null_position : None or list-like of booleans
None or list-like of boolean values indicating desired order of nulls
compared to other elements. If list-like, size of list-like must be
len(columns). If None, null order is set to before. False (0) - after,
True (1) - before.
Returns
-------
returns : boolean
Returns True, if sorted as expected by ``ascending`` and
``null_position``, False otherwise.
"""
cdef vector[cpp_order] column_order
cdef vector[null_order] null_precedence
if ascending is None:
column_order = vector[cpp_order](
len(source_columns), cpp_order.ASCENDING
)
else:
if len(ascending) != len(source_columns):
raise ValueError(
f"Expected a list-like of length {len(source_columns)}, "
f"got length {len(ascending)} for `ascending`"
)
column_order = vector[cpp_order](
len(source_columns), cpp_order.DESCENDING
)
for idx, val in enumerate(ascending):
if val:
column_order[idx] = cpp_order.ASCENDING
if null_position is None:
null_precedence = vector[null_order](
len(source_columns), null_order.AFTER
)
else:
if len(null_position) != len(source_columns):
raise ValueError(
f"Expected a list-like of length {len(source_columns)}, "
f"got length {len(null_position)} for `null_position`"
)
null_precedence = vector[null_order](
len(source_columns), null_order.AFTER
)
for idx, val in enumerate(null_position):
if val:
null_precedence[idx] = null_order.BEFORE
cdef bool c_result
cdef table_view source_table_view = table_view_from_columns(source_columns)
with nogil:
c_result = cpp_is_sorted(
source_table_view,
column_order,
null_precedence
)
return c_result
cdef pair[vector[cpp_order], vector[null_order]] ordering(
column_order, null_precedence
):
"""
Construct order and null order vectors
Parameters
----------
column_order
Iterable of bool (True for ascending order, False for descending)
null_precedence
Iterable string for null positions ("first" for start, "last" for end)
Both iterables must be the same length (not checked)
Returns
-------
pair of vectors (order, and null_order)
"""
cdef vector[cpp_order] c_column_order
cdef vector[null_order] c_null_precedence
for asc, null in zip(column_order, null_precedence):
c_column_order.push_back(
cpp_order.ASCENDING if asc else cpp_order.DESCENDING
)
if asc ^ (null == "first"):
c_null_precedence.push_back(null_order.AFTER)
elif asc ^ (null == "last"):
c_null_precedence.push_back(null_order.BEFORE)
else:
raise ValueError(f"Invalid null precedence {null}")
return pair[vector[cpp_order], vector[null_order]](
c_column_order, c_null_precedence
)
@acquire_spill_lock()
def order_by(
list columns_from_table,
object ascending,
str na_position,
*,
bool stable
):
"""
Get index to sort the table in ascending/descending order.
Parameters
----------
columns_from_table : list[Column]
Columns from the table which will be sorted
ascending : sequence[bool]
Sequence of boolean values which correspond to each column
in the table to be sorted signifying the order of each column
True - Ascending and False - Descending
na_position : str
Whether null values should show up at the "first" or "last"
position of **all** sorted column.
stable : bool
Should the sort be stable? (no default)
Returns
-------
Column of indices that sorts the table
"""
cdef table_view source_table_view = table_view_from_columns(
columns_from_table
)
cdef pair[vector[cpp_order], vector[null_order]] order = ordering(
ascending, repeat(na_position)
)
cdef unique_ptr[column] c_result
if stable:
with nogil:
c_result = move(stable_sorted_order(source_table_view,
order.first,
order.second))
else:
with nogil:
c_result = move(sorted_order(source_table_view,
order.first,
order.second))
return Column.from_unique_ptr(move(c_result))
@acquire_spill_lock()
def sort(
list values,
list column_order=None,
list null_precedence=None,
):
"""
Sort the table in ascending/descending order.
Parameters
----------
values : list[Column]
Columns of the table which will be sorted
column_order : list[bool], optional
Sequence of boolean values which correspond to each column in
keys providing the sort order (default all True).
With True <=> ascending; False <=> descending.
null_precedence : list[str], optional
Sequence of "first" or "last" values (default "first")
indicating the position of null values when sorting the keys.
"""
cdef table_view values_view = table_view_from_columns(values)
cdef unique_ptr[table] result
ncol = len(values)
cdef pair[vector[cpp_order], vector[null_order]] order = ordering(
column_order or repeat(True, ncol),
null_precedence or repeat("first", ncol),
)
with nogil:
result = move(
cpp_sort(
values_view,
order.first,
order.second,
)
)
return columns_from_unique_ptr(move(result))
@acquire_spill_lock()
def sort_by_key(
list values,
list keys,
object ascending,
object na_position,
*,
bool stable,
):
"""
Sort a table by given keys
Parameters
----------
values : list[Column]
Columns of the table which will be sorted
keys : list[Column]
Columns making up the sort key
ascending : list[bool]
Sequence of boolean values which correspond to each column
in the table to be sorted signifying the order of each column
True - Ascending and False - Descending
na_position : list[str]
Sequence of "first" or "last" values (default "first")
indicating the position of null values when sorting the keys.
stable : bool
Should the sort be stable? (no default)
Returns
-------
list[Column]
list of value columns sorted by keys
"""
cdef table_view value_view = table_view_from_columns(values)
cdef table_view key_view = table_view_from_columns(keys)
cdef pair[vector[cpp_order], vector[null_order]] order = ordering(
ascending, na_position
)
cdef unique_ptr[table] c_result
if stable:
with nogil:
c_result = move(cpp_stable_sort_by_key(value_view,
key_view,
order.first,
order.second))
else:
with nogil:
c_result = move(cpp_sort_by_key(value_view,
key_view,
order.first,
order.second))
return columns_from_unique_ptr(move(c_result))
@acquire_spill_lock()
def segmented_sort_by_key(
list values,
list keys,
Column segment_offsets,
list column_order=None,
list null_precedence=None,
*,
bool stable,
):
"""
Sort segments of a table by given keys
Parameters
----------
values : list[Column]
Columns of the table which will be sorted
keys : list[Column]
Columns making up the sort key
offsets : Column
Segment offsets
column_order : list[bool], optional
Sequence of boolean values which correspond to each column in
keys providing the sort order (default all True).
With True <=> ascending; False <=> descending.
null_precedence : list[str], optional
Sequence of "first" or "last" values (default "first")
indicating the position of null values when sorting the keys.
stable : bool
Should the sort be stable? (no default)
Returns
-------
list[Column]
list of value columns sorted by keys
"""
cdef table_view values_view = table_view_from_columns(values)
cdef table_view keys_view = table_view_from_columns(keys)
cdef column_view offsets_view = segment_offsets.view()
cdef unique_ptr[table] result
ncol = len(values)
cdef pair[vector[cpp_order], vector[null_order]] order = ordering(
column_order or repeat(True, ncol),
null_precedence or repeat("first", ncol),
)
if stable:
with nogil:
result = move(
cpp_stable_segmented_sort_by_key(
values_view,
keys_view,
offsets_view,
order.first,
order.second,
)
)
else:
with nogil:
result = move(
cpp_segmented_sort_by_key(
values_view,
keys_view,
offsets_view,
order.first,
order.second,
)
)
return columns_from_unique_ptr(move(result))
@acquire_spill_lock()
def digitize(list source_columns, list bins, bool right=False):
"""
Return the indices of the bins to which each value in source_table belongs.
Parameters
----------
source_columns : Input columns to be binned.
bins : List containing columns of bins
right : Indicating whether the intervals include the
right or the left bin edge.
"""
cdef table_view bins_view = table_view_from_columns(bins)
cdef table_view source_table_view = table_view_from_columns(
source_columns
)
cdef vector[cpp_order] column_order = (
vector[cpp_order](
bins_view.num_columns(),
cpp_order.ASCENDING
)
)
cdef vector[null_order] null_precedence = (
vector[null_order](
bins_view.num_columns(),
null_order.BEFORE
)
)
cdef unique_ptr[column] c_result
if right:
with nogil:
c_result = move(lower_bound(
bins_view,
source_table_view,
column_order,
null_precedence)
)
else:
with nogil:
c_result = move(upper_bound(
bins_view,
source_table_view,
column_order,
null_precedence)
)
return Column.from_unique_ptr(move(c_result))
@acquire_spill_lock()
def rank_columns(list source_columns, object method, str na_option,
bool ascending, bool pct
):
"""
Compute numerical data ranks (1 through n) of each column in the dataframe
"""
cdef rank_method c_rank_method = < rank_method > (
< underlying_type_t_rank_method > method
)
cdef cpp_order column_order = (
cpp_order.ASCENDING
if ascending
else cpp_order.DESCENDING
)
# ascending
# #top = na_is_smallest
# #bottom = na_is_largest
# #keep = na_is_largest
# descending
# #top = na_is_largest
# #bottom = na_is_smallest
# #keep = na_is_smallest
cdef null_order null_precedence
if ascending:
if na_option == 'top':
null_precedence = null_order.BEFORE
else:
null_precedence = null_order.AFTER
else:
if na_option == 'top':
null_precedence = null_order.AFTER
else:
null_precedence = null_order.BEFORE
cdef null_policy c_null_handling = (
null_policy.EXCLUDE
if na_option == 'keep'
else null_policy.INCLUDE
)
cdef bool percentage = pct
cdef vector[unique_ptr[column]] c_results
cdef column_view c_view
cdef Column col
for col in source_columns:
c_view = col.view()
with nogil:
c_results.push_back(move(
rank(
c_view,
c_rank_method,
column_order,
c_null_handling,
null_precedence,
percentage
)
))
return [Column.from_unique_ptr(
move(c_results[i])
) for i in range(c_results.size())]
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/text.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from io import TextIOBase
from cython.operator cimport dereference
from libc.stdint cimport uint64_t
from libcpp.memory cimport unique_ptr
from libcpp.string cimport string
from libcpp.utility cimport move
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.io.text cimport (
byte_range_info,
data_chunk_source,
make_source,
make_source_from_bgzip_file,
make_source_from_file,
multibyte_split,
parse_options,
)
def read_text(object filepaths_or_buffers,
object delimiter=None,
object byte_range=None,
object strip_delimiters=False,
object compression=None,
object compression_offsets=None):
"""
Cython function to call into libcudf API, see `multibyte_split`.
See Also
--------
cudf.io.text.read_text
"""
cdef string delim = delimiter.encode()
cdef unique_ptr[data_chunk_source] datasource
cdef unique_ptr[column] c_col
cdef size_t c_byte_range_offset
cdef size_t c_byte_range_size
cdef uint64_t c_compression_begin_offset
cdef uint64_t c_compression_end_offset
cdef parse_options c_options
if compression is None:
if isinstance(filepaths_or_buffers, TextIOBase):
datasource = move(make_source(
filepaths_or_buffers.read().encode()))
else:
datasource = move(make_source_from_file(
filepaths_or_buffers.encode()))
elif compression == "bgzip":
if isinstance(filepaths_or_buffers, TextIOBase):
raise ValueError("bgzip compression requires a file path")
if compression_offsets is not None:
if len(compression_offsets) != 2:
raise ValueError(
"compression offsets need to consist of two elements")
c_compression_begin_offset = compression_offsets[0]
c_compression_end_offset = compression_offsets[1]
datasource = move(make_source_from_bgzip_file(
filepaths_or_buffers.encode(),
c_compression_begin_offset,
c_compression_end_offset))
else:
datasource = move(make_source_from_bgzip_file(
filepaths_or_buffers.encode()))
else:
raise ValueError("Only bgzip compression is supported at the moment")
c_options = parse_options()
if byte_range is not None:
c_byte_range_offset = byte_range[0]
c_byte_range_size = byte_range[1]
c_options.byte_range = byte_range_info(
c_byte_range_offset,
c_byte_range_size)
c_options.strip_delimiters = strip_delimiters
with nogil:
c_col = move(multibyte_split(
dereference(datasource),
delim,
c_options))
return {None: Column.from_unique_ptr(move(c_col))}
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/csv.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from libcpp cimport bool
from libcpp.map cimport map
from libcpp.memory cimport unique_ptr
from libcpp.string cimport string
from libcpp.utility cimport move
from libcpp.vector cimport vector
cimport cudf._lib.cpp.types as libcudf_types
from cudf._lib.cpp.types cimport data_type
from cudf._lib.io.datasource cimport Datasource, NativeFileDatasource
from cudf._lib.types cimport dtype_to_data_type
import numpy as np
import pandas as pd
import cudf
from cudf.core.buffer import acquire_spill_lock
from cudf._lib.cpp.types cimport size_type
import errno
import os
from collections import abc
from enum import IntEnum
from io import BytesIO, StringIO
from libc.stdint cimport int32_t
from libcpp cimport bool
from cudf._lib.cpp.io.csv cimport (
csv_reader_options,
csv_writer_options,
read_csv as cpp_read_csv,
write_csv as cpp_write_csv,
)
from cudf._lib.cpp.io.data_sink cimport data_sink
from cudf._lib.cpp.io.types cimport (
compression_type,
quote_style,
sink_info,
source_info,
table_with_metadata,
)
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.io.utils cimport make_sink_info, make_source_info
from cudf._lib.utils cimport data_from_unique_ptr, table_view_from_table
from pyarrow.lib import NativeFile
from cudf.api.types import is_hashable
ctypedef int32_t underlying_type_t_compression
class Compression(IntEnum):
INFER = (
<underlying_type_t_compression> compression_type.AUTO
)
SNAPPY = (
<underlying_type_t_compression> compression_type.SNAPPY
)
GZIP = (
<underlying_type_t_compression> compression_type.GZIP
)
BZ2 = (
<underlying_type_t_compression> compression_type.BZIP2
)
BROTLI = (
<underlying_type_t_compression> compression_type.BROTLI
)
ZIP = (
<underlying_type_t_compression> compression_type.ZIP
)
XZ = (
<underlying_type_t_compression> compression_type.XZ
)
CSV_HEX_TYPE_MAP = {
"hex": np.dtype("int64"),
"hex64": np.dtype("int64"),
"hex32": np.dtype("int32")
}
cdef csv_reader_options make_csv_reader_options(
object datasource,
object lineterminator,
object quotechar,
int quoting,
bool doublequote,
object header,
bool mangle_dupe_cols,
object usecols,
object delimiter,
bool delim_whitespace,
bool skipinitialspace,
object names,
object dtype,
int skipfooter,
int skiprows,
bool dayfirst,
object compression,
object thousands,
object decimal,
object true_values,
object false_values,
object nrows,
object byte_range,
bool skip_blank_lines,
object parse_dates,
object comment,
object na_values,
bool keep_default_na,
bool na_filter,
object prefix,
object index_col,
) except *:
cdef source_info c_source_info = make_source_info([datasource])
cdef compression_type c_compression
cdef vector[string] c_names
cdef size_t c_byte_range_offset = (
byte_range[0] if byte_range is not None else 0
)
cdef size_t c_byte_range_size = (
byte_range[1] if byte_range is not None else 0
)
cdef vector[int] c_use_cols_indexes
cdef vector[string] c_use_cols_names
cdef size_type c_nrows = nrows if nrows is not None else -1
cdef quote_style c_quoting
cdef vector[string] c_parse_dates_names
cdef vector[int] c_parse_dates_indexes
cdef vector[string] c_hex_col_names
cdef vector[data_type] c_dtypes_list
cdef map[string, data_type] c_dtypes_map
cdef vector[int] c_hex_col_indexes
cdef vector[string] c_true_values
cdef vector[string] c_false_values
cdef vector[string] c_na_values
# Reader settings
if compression is None:
c_compression = compression_type.NONE
else:
compression = str(compression)
compression = Compression[compression.upper()]
c_compression = <compression_type> (
<underlying_type_t_compression> compression
)
if quoting == 1:
c_quoting = quote_style.QUOTE_ALL
elif quoting == 2:
c_quoting = quote_style.QUOTE_NONNUMERIC
elif quoting == 3:
c_quoting = quote_style.QUOTE_NONE
else:
# Default value
c_quoting = quote_style.QUOTE_MINIMAL
cdef csv_reader_options csv_reader_options_c = move(
csv_reader_options.builder(c_source_info)
.compression(c_compression)
.mangle_dupe_cols(mangle_dupe_cols)
.byte_range_offset(c_byte_range_offset)
.byte_range_size(c_byte_range_size)
.nrows(c_nrows)
.skiprows(skiprows)
.skipfooter(skipfooter)
.quoting(c_quoting)
.lineterminator(ord(lineterminator))
.quotechar(ord(quotechar))
.decimal(ord(decimal))
.delim_whitespace(delim_whitespace)
.skipinitialspace(skipinitialspace)
.skip_blank_lines(skip_blank_lines)
.doublequote(doublequote)
.keep_default_na(keep_default_na)
.na_filter(na_filter)
.dayfirst(dayfirst)
.build()
)
if names is not None:
# explicitly mentioned name, so don't check header
if header is None or header == 'infer':
csv_reader_options_c.set_header(-1)
else:
csv_reader_options_c.set_header(header)
c_names.reserve(len(names))
for name in names:
c_names.push_back(str(name).encode())
csv_reader_options_c.set_names(c_names)
else:
if header is None:
csv_reader_options_c.set_header(-1)
elif header == 'infer':
csv_reader_options_c.set_header(0)
else:
csv_reader_options_c.set_header(header)
if prefix is not None:
csv_reader_options_c.set_prefix(prefix.encode())
if usecols is not None:
all_int = all(isinstance(col, int) for col in usecols)
if all_int:
c_use_cols_indexes.reserve(len(usecols))
c_use_cols_indexes = usecols
csv_reader_options_c.set_use_cols_indexes(c_use_cols_indexes)
else:
c_use_cols_names.reserve(len(usecols))
for col_name in usecols:
c_use_cols_names.push_back(
str(col_name).encode()
)
csv_reader_options_c.set_use_cols_names(c_use_cols_names)
if delimiter is not None:
csv_reader_options_c.set_delimiter(ord(delimiter))
if thousands is not None:
csv_reader_options_c.set_thousands(ord(thousands))
if comment is not None:
csv_reader_options_c.set_comment(ord(comment))
if parse_dates is not None:
if isinstance(parse_dates, abc.Mapping):
raise NotImplementedError(
"`parse_dates`: dictionaries are unsupported")
if not isinstance(parse_dates, abc.Iterable):
raise NotImplementedError(
"`parse_dates`: an iterable is required")
for col in parse_dates:
if isinstance(col, str):
c_parse_dates_names.push_back(str(col).encode())
elif isinstance(col, int):
c_parse_dates_indexes.push_back(col)
else:
raise NotImplementedError(
"`parse_dates`: Nesting is unsupported")
csv_reader_options_c.set_parse_dates(c_parse_dates_names)
csv_reader_options_c.set_parse_dates(c_parse_dates_indexes)
if dtype is not None:
if isinstance(dtype, abc.Mapping):
for k, v in dtype.items():
col_type = v
if is_hashable(v) and v in CSV_HEX_TYPE_MAP:
col_type = CSV_HEX_TYPE_MAP[v]
c_hex_col_names.push_back(str(k).encode())
c_dtypes_map[str(k).encode()] = \
_get_cudf_data_type_from_dtype(
cudf.dtype(col_type))
csv_reader_options_c.set_dtypes(c_dtypes_map)
csv_reader_options_c.set_parse_hex(c_hex_col_names)
elif (
cudf.api.types.is_scalar(dtype) or
isinstance(dtype, (
np.dtype, pd.api.extensions.ExtensionDtype, type
))
):
c_dtypes_list.reserve(1)
if is_hashable(dtype) and dtype in CSV_HEX_TYPE_MAP:
dtype = CSV_HEX_TYPE_MAP[dtype]
c_hex_col_indexes.push_back(0)
c_dtypes_list.push_back(
_get_cudf_data_type_from_dtype(dtype)
)
csv_reader_options_c.set_dtypes(c_dtypes_list)
csv_reader_options_c.set_parse_hex(c_hex_col_indexes)
elif isinstance(dtype, abc.Collection):
c_dtypes_list.reserve(len(dtype))
for index, col_dtype in enumerate(dtype):
if is_hashable(col_dtype) and col_dtype in CSV_HEX_TYPE_MAP:
col_dtype = CSV_HEX_TYPE_MAP[col_dtype]
c_hex_col_indexes.push_back(index)
c_dtypes_list.push_back(
_get_cudf_data_type_from_dtype(col_dtype)
)
csv_reader_options_c.set_dtypes(c_dtypes_list)
csv_reader_options_c.set_parse_hex(c_hex_col_indexes)
else:
raise ValueError(
"dtype should be a scalar/str/list-like/dict-like"
)
if true_values is not None:
c_true_values.reserve(len(true_values))
for tv in true_values:
c_true_values.push_back(tv.encode())
csv_reader_options_c.set_true_values(c_true_values)
if false_values is not None:
c_false_values.reserve(len(false_values))
for fv in false_values:
c_false_values.push_back(fv.encode())
csv_reader_options_c.set_false_values(c_false_values)
if na_values is not None:
c_na_values.reserve(len(na_values))
for nv in na_values:
c_na_values.push_back(nv.encode())
csv_reader_options_c.set_na_values(c_na_values)
return csv_reader_options_c
def validate_args(
object delimiter,
object sep,
bool delim_whitespace,
object decimal,
object thousands,
object nrows,
int skipfooter,
object byte_range,
int skiprows
):
if delim_whitespace:
if delimiter is not None:
raise ValueError("cannot set both delimiter and delim_whitespace")
if sep != ',':
raise ValueError("cannot set both sep and delim_whitespace")
# Alias sep -> delimiter.
actual_delimiter = delimiter if delimiter else sep
if decimal == actual_delimiter:
raise ValueError("decimal cannot be the same as delimiter")
if thousands == actual_delimiter:
raise ValueError("thousands cannot be the same as delimiter")
if nrows is not None and skipfooter != 0:
raise ValueError("cannot use both nrows and skipfooter parameters")
if byte_range is not None:
if skipfooter != 0 or skiprows != 0 or nrows is not None:
raise ValueError("""cannot manually limit rows to be read when
using the byte range parameter""")
def read_csv(
object datasource,
object lineterminator="\n",
object quotechar='"',
int quoting=0,
bool doublequote=True,
object header="infer",
bool mangle_dupe_cols=True,
object usecols=None,
object sep=",",
object delimiter=None,
bool delim_whitespace=False,
bool skipinitialspace=False,
object names=None,
object dtype=None,
int skipfooter=0,
int skiprows=0,
bool dayfirst=False,
object compression="infer",
object thousands=None,
object decimal=".",
object true_values=None,
object false_values=None,
object nrows=None,
object byte_range=None,
bool skip_blank_lines=True,
object parse_dates=None,
object comment=None,
object na_values=None,
bool keep_default_na=True,
bool na_filter=True,
object prefix=None,
object index_col=None,
**kwargs,
):
"""
Cython function to call into libcudf API, see `read_csv`.
See Also
--------
cudf.read_csv
"""
if not isinstance(datasource, (BytesIO, StringIO, bytes,
Datasource,
NativeFile)):
if not os.path.isfile(datasource):
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), datasource
)
if isinstance(datasource, StringIO):
datasource = datasource.read().encode()
elif isinstance(datasource, str) and not os.path.isfile(datasource):
datasource = datasource.encode()
elif isinstance(datasource, NativeFile):
datasource = NativeFileDatasource(datasource)
validate_args(delimiter, sep, delim_whitespace, decimal, thousands,
nrows, skipfooter, byte_range, skiprows)
# Alias sep -> delimiter.
if delimiter is None:
delimiter = sep
cdef csv_reader_options read_csv_options_c = make_csv_reader_options(
datasource, lineterminator, quotechar, quoting, doublequote,
header, mangle_dupe_cols, usecols, delimiter, delim_whitespace,
skipinitialspace, names, dtype, skipfooter, skiprows, dayfirst,
compression, thousands, decimal, true_values, false_values, nrows,
byte_range, skip_blank_lines, parse_dates, comment, na_values,
keep_default_na, na_filter, prefix, index_col)
cdef table_with_metadata c_result
with nogil:
c_result = move(cpp_read_csv(read_csv_options_c))
meta_names = [info.name.decode() for info in c_result.metadata.schema_info]
df = cudf.DataFrame._from_data(*data_from_unique_ptr(
move(c_result.tbl),
column_names=meta_names
))
if dtype is not None:
if isinstance(dtype, abc.Mapping):
for k, v in dtype.items():
if cudf.api.types.is_categorical_dtype(v):
df._data[str(k)] = df._data[str(k)].astype(v)
elif (
cudf.api.types.is_scalar(dtype) or
isinstance(dtype, (
np.dtype, pd.api.extensions.ExtensionDtype, type
))
):
if cudf.api.types.is_categorical_dtype(dtype):
df = df.astype(dtype)
elif isinstance(dtype, abc.Collection):
for index, col_dtype in enumerate(dtype):
if cudf.api.types.is_categorical_dtype(col_dtype):
col_name = df._data.names[index]
df._data[col_name] = df._data[col_name].astype(col_dtype)
if names is not None and isinstance(names[0], (int)):
df.columns = [int(x) for x in df._data]
# Set index if the index_col parameter is passed
if index_col is not None and index_col is not False:
if isinstance(index_col, int):
index_col_name = df._data.select_by_index(index_col).names[0]
df = df.set_index(index_col_name)
if isinstance(index_col_name, str) and \
names is None and header in ("infer",):
if index_col_name.startswith("Unnamed:"):
# TODO: Try to upstream it to libcudf
# csv reader in future
df._index.name = None
elif names is None:
df._index.name = index_col
else:
df = df.set_index(index_col)
return df
@acquire_spill_lock()
def write_csv(
table,
object path_or_buf=None,
object sep=",",
object na_rep="",
bool header=True,
object lineterminator="\n",
int rows_per_chunk=8,
bool index=True,
):
"""
Cython function to call into libcudf API, see `write_csv`.
See Also
--------
cudf.to_csv
"""
cdef table_view input_table_view = table_view_from_table(
table, not index
)
cdef bool include_header_c = header
cdef char delim_c = ord(sep)
cdef string line_term_c = lineterminator.encode()
cdef string na_c = na_rep.encode()
cdef int rows_per_chunk_c = rows_per_chunk
cdef vector[string] col_names
cdef string true_value_c = 'True'.encode()
cdef string false_value_c = 'False'.encode()
cdef unique_ptr[data_sink] data_sink_c
cdef sink_info sink_info_c = make_sink_info(path_or_buf, data_sink_c)
if header is True:
all_names = columns_apply_na_rep(table._column_names, na_rep)
if index is True:
all_names = table._index.names + all_names
if len(all_names) > 0:
col_names.reserve(len(all_names))
if len(all_names) == 1:
if all_names[0] in (None, ''):
col_names.push_back('""'.encode())
else:
col_names.push_back(
str(all_names[0]).encode()
)
else:
for idx, col_name in enumerate(all_names):
if col_name is None:
col_names.push_back(''.encode())
else:
col_names.push_back(
str(col_name).encode()
)
cdef csv_writer_options options = move(
csv_writer_options.builder(sink_info_c, input_table_view)
.names(col_names)
.na_rep(na_c)
.include_header(include_header_c)
.rows_per_chunk(rows_per_chunk_c)
.line_terminator(line_term_c)
.inter_column_delimiter(delim_c)
.true_value(true_value_c)
.false_value(false_value_c)
.build()
)
try:
with nogil:
cpp_write_csv(options)
except OverflowError:
raise OverflowError(
f"Writing CSV file with chunksize={rows_per_chunk} failed. "
"Consider providing a smaller chunksize argument."
)
cdef data_type _get_cudf_data_type_from_dtype(object dtype) except *:
# TODO: Remove this work-around Dictionary types
# in libcudf are fully mapped to categorical columns:
# https://github.com/rapidsai/cudf/issues/3960
if cudf.api.types.is_categorical_dtype(dtype):
if isinstance(dtype, str):
dtype = "str"
else:
dtype = dtype.categories.dtype
if isinstance(dtype, str):
if str(dtype) == "date32":
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_DAYS
)
elif str(dtype) in ("date", "date64"):
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_MILLISECONDS
)
elif str(dtype) == "timestamp":
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_MILLISECONDS
)
elif str(dtype) == "timestamp[us]":
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_MICROSECONDS
)
elif str(dtype) == "timestamp[s]":
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_SECONDS
)
elif str(dtype) == "timestamp[ms]":
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_MILLISECONDS
)
elif str(dtype) == "timestamp[ns]":
return libcudf_types.data_type(
libcudf_types.type_id.TIMESTAMP_NANOSECONDS
)
dtype = cudf.dtype(dtype)
return dtype_to_data_type(dtype)
def columns_apply_na_rep(column_names, na_rep):
return tuple(
na_rep if pd.isnull(col_name)
else col_name
for col_name in column_names
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/utils.pxd
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from libcpp.memory cimport unique_ptr
from libcpp.string cimport string
from libcpp.vector cimport vector
from cudf._lib.cpp.column.column cimport column_view
from cudf._lib.cpp.table.table cimport table, table_view
cdef vector[column_view] make_column_views(object columns) except*
cdef vector[string] get_column_names(object table, object index) except*
cdef data_from_unique_ptr(
unique_ptr[table] c_tbl, column_names, index_names=*)
cdef data_from_table_view(
table_view tv, object owner, object column_names, object index_names=*)
cdef table_view table_view_from_columns(columns) except *
cdef table_view table_view_from_table(tbl, ignore_index=*) except*
cdef columns_from_unique_ptr(unique_ptr[table] c_tbl)
cdef columns_from_table_view(table_view tv, object owners)
cdef columns_from_pylibcudf_table(tbl)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/aggregation.pxd
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from libcpp.memory cimport unique_ptr
from cudf._lib.cpp.aggregation cimport (
groupby_aggregation,
groupby_scan_aggregation,
reduce_aggregation,
rolling_aggregation,
scan_aggregation,
)
cdef class RollingAggregation:
cdef unique_ptr[rolling_aggregation] c_obj
cdef class GroupbyAggregation:
cdef unique_ptr[groupby_aggregation] c_obj
cdef class GroupbyScanAggregation:
cdef unique_ptr[groupby_scan_aggregation] c_obj
cdef class ReduceAggregation:
cdef unique_ptr[reduce_aggregation] c_obj
cdef class ScanAggregation:
cdef unique_ptr[scan_aggregation] c_obj
cdef RollingAggregation make_rolling_aggregation(op, kwargs=*)
cdef GroupbyAggregation make_groupby_aggregation(op, kwargs=*)
cdef GroupbyScanAggregation make_groupby_scan_aggregation(op, kwargs=*)
cdef ReduceAggregation make_reduce_aggregation(op, kwargs=*)
cdef ScanAggregation make_scan_aggregation(op, kwargs=*)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/groupby.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from pandas.core.groupby.groupby import DataError
from cudf.api.types import (
is_categorical_dtype,
is_decimal_dtype,
is_interval_dtype,
is_list_dtype,
is_string_dtype,
is_struct_dtype,
)
from cudf.core.buffer import acquire_spill_lock
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.pair cimport pair
from libcpp.utility cimport move
from libcpp.vector cimport vector
from cudf._lib.column cimport Column
from cudf._lib.scalar cimport DeviceScalar
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
from cudf._lib.scalar import as_device_scalar
from libcpp.functional cimport reference_wrapper
cimport cudf._lib.cpp.groupby as libcudf_groupby
cimport cudf._lib.cpp.types as libcudf_types
from cudf._lib.aggregation cimport (
GroupbyAggregation,
GroupbyScanAggregation,
make_groupby_aggregation,
make_groupby_scan_aggregation,
)
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.replace cimport replace_policy
from cudf._lib.cpp.scalar.scalar cimport scalar
from cudf._lib.cpp.table.table cimport table, table_view
from cudf._lib.cpp.types cimport size_type
# The sets below define the possible aggregations that can be performed on
# different dtypes. These strings must be elements of the AggregationKind enum.
# The libcudf infrastructure exists for "COLLECT" support on
# categoricals, but the dtype support in python does not.
_CATEGORICAL_AGGS = {"COUNT", "NUNIQUE", "SIZE", "UNIQUE"}
_STRING_AGGS = {
"COLLECT",
"COUNT",
"MAX",
"MIN",
"NTH",
"NUNIQUE",
"SIZE",
"UNIQUE",
}
_LIST_AGGS = {"COLLECT"}
_STRUCT_AGGS = {"COLLECT", "CORRELATION", "COVARIANCE"}
_INTERVAL_AGGS = {"COLLECT"}
_DECIMAL_AGGS = {
"ARGMIN",
"ARGMAX",
"COLLECT",
"COUNT",
"MAX",
"MIN",
"NTH",
"NUNIQUE",
"SUM",
}
# workaround for https://github.com/cython/cython/issues/3885
ctypedef const scalar constscalar
cdef _agg_result_from_columns(
vector[libcudf_groupby.aggregation_result]& c_result_columns,
set column_included,
int n_input_columns
):
"""Construct the list of result columns from libcudf result. The result
contains the same number of lists as the number of input columns. Result
for an input column that has no applicable aggregations is an empty list.
"""
cdef:
int i
int j
int result_index = 0
vector[unique_ptr[column]]* c_result
result_columns = []
for i in range(n_input_columns):
if i in column_included:
c_result = &c_result_columns[result_index].results
result_columns.append([
Column.from_unique_ptr(move(c_result[0][j]))
for j in range(c_result[0].size())
])
result_index += 1
else:
result_columns.append([])
return result_columns
cdef class GroupBy:
cdef unique_ptr[libcudf_groupby.groupby] c_obj
cdef dict __dict__
def __cinit__(self, list keys, bool dropna=True, *args, **kwargs):
cdef libcudf_types.null_policy c_null_handling
cdef table_view keys_view
if dropna:
c_null_handling = libcudf_types.null_policy.EXCLUDE
else:
c_null_handling = libcudf_types.null_policy.INCLUDE
with acquire_spill_lock() as spill_lock:
keys_view = table_view_from_columns(keys)
# We spill lock the columns while this GroupBy instance is alive.
self._spill_lock = spill_lock
with nogil:
self.c_obj.reset(
new libcudf_groupby.groupby(
keys_view,
c_null_handling,
)
)
def __init__(self, list keys, bool dropna=True):
self.keys = keys
self.dropna = dropna
def groups(self, list values):
"""
Perform a sort groupby, using ``self.keys`` as the key columns
and ``values`` as the value columns.
Parameters
----------
values: list of Columns
The value columns
Returns
-------
grouped_keys: list of Columns
The grouped key columns
grouped_values: list of Columns
The grouped value columns
offsets: list of integers
Integer offsets such that offsets[i+1] - offsets[i]
represents the size of group `i`.
"""
cdef table_view values_view = table_view_from_columns(values)
with nogil:
c_groups = move(self.c_obj.get()[0].get_groups(values_view))
grouped_key_cols = columns_from_unique_ptr(move(c_groups.keys))
if values:
grouped_value_cols = columns_from_unique_ptr(move(c_groups.values))
else:
grouped_value_cols = []
return grouped_key_cols, grouped_value_cols, c_groups.offsets
def aggregate_internal(self, values, aggregations):
"""`values` is a list of columns and `aggregations` is a list of list
of aggregations. `aggregations[i]` is a list of aggregations for
`values[i]`. Returns a tuple containing 1) list of list of aggregation
results, 2) a list of grouped keys, and 3) a list of list of
aggregations performed.
"""
cdef vector[libcudf_groupby.aggregation_request] c_agg_requests
cdef libcudf_groupby.aggregation_request c_agg_request
cdef Column col
cdef GroupbyAggregation agg_obj
cdef pair[
unique_ptr[table],
vector[libcudf_groupby.aggregation_result]
] c_result
allow_empty = all(len(v) == 0 for v in aggregations)
included_aggregations = []
column_included = set()
for i, (col, aggs) in enumerate(zip(values, aggregations)):
dtype = col.dtype
valid_aggregations = (
_LIST_AGGS if is_list_dtype(dtype)
else _STRING_AGGS if is_string_dtype(dtype)
else _CATEGORICAL_AGGS if is_categorical_dtype(dtype)
else _STRUCT_AGGS if is_struct_dtype(dtype)
else _INTERVAL_AGGS if is_interval_dtype(dtype)
else _DECIMAL_AGGS if is_decimal_dtype(dtype)
else "ALL"
)
included_aggregations_i = []
c_agg_request = move(libcudf_groupby.aggregation_request())
for agg in aggs:
agg_obj = make_groupby_aggregation(agg)
if (valid_aggregations == "ALL"
or agg_obj.kind in valid_aggregations):
included_aggregations_i.append((agg, agg_obj.kind))
c_agg_request.aggregations.push_back(
move(agg_obj.c_obj)
)
included_aggregations.append(included_aggregations_i)
if not c_agg_request.aggregations.empty():
c_agg_request.values = col.view()
c_agg_requests.push_back(
move(c_agg_request)
)
column_included.add(i)
if c_agg_requests.empty() and not allow_empty:
raise DataError("All requested aggregations are unsupported.")
with nogil:
c_result = move(
self.c_obj.get()[0].aggregate(
c_agg_requests
)
)
grouped_keys = columns_from_unique_ptr(
move(c_result.first)
)
result_columns = _agg_result_from_columns(
c_result.second, column_included, len(values)
)
return result_columns, grouped_keys, included_aggregations
def scan_internal(self, values, aggregations):
"""`values` is a list of columns and `aggregations` is a list of list
of aggregations. `aggregations[i]` is a list of aggregations for
`values[i]`. Returns a tuple containing 1) list of list of aggregation
results, 2) a list of grouped keys, and 3) a list of list of
aggregations performed.
"""
cdef vector[libcudf_groupby.scan_request] c_agg_requests
cdef libcudf_groupby.scan_request c_agg_request
cdef Column col
cdef GroupbyScanAggregation agg_obj
cdef pair[
unique_ptr[table],
vector[libcudf_groupby.aggregation_result]
] c_result
allow_empty = all(len(v) == 0 for v in aggregations)
included_aggregations = []
column_included = set()
for i, (col, aggs) in enumerate(zip(values, aggregations)):
dtype = col.dtype
valid_aggregations = (
_LIST_AGGS if is_list_dtype(dtype)
else _STRING_AGGS if is_string_dtype(dtype)
else _CATEGORICAL_AGGS if is_categorical_dtype(dtype)
else _STRUCT_AGGS if is_struct_dtype(dtype)
else _INTERVAL_AGGS if is_interval_dtype(dtype)
else _DECIMAL_AGGS if is_decimal_dtype(dtype)
else "ALL"
)
included_aggregations_i = []
c_agg_request = move(libcudf_groupby.scan_request())
for agg in aggs:
agg_obj = make_groupby_scan_aggregation(agg)
if (valid_aggregations == "ALL"
or agg_obj.kind in valid_aggregations):
included_aggregations_i.append((agg, agg_obj.kind))
c_agg_request.aggregations.push_back(
move(agg_obj.c_obj)
)
included_aggregations.append(included_aggregations_i)
if not c_agg_request.aggregations.empty():
c_agg_request.values = col.view()
c_agg_requests.push_back(
move(c_agg_request)
)
column_included.add(i)
if c_agg_requests.empty() and not allow_empty:
raise DataError("All requested aggregations are unsupported.")
with nogil:
c_result = move(
self.c_obj.get()[0].scan(
c_agg_requests
)
)
grouped_keys = columns_from_unique_ptr(
move(c_result.first)
)
result_columns = _agg_result_from_columns(
c_result.second, column_included, len(values)
)
return result_columns, grouped_keys, included_aggregations
def aggregate(self, values, aggregations):
"""
Parameters
----------
values : Frame
aggregations
A dict mapping column names in `Frame` to a list of aggregations
to perform on that column
Each aggregation may be specified as:
- a string (e.g., "max")
- a lambda/function
Returns
-------
Frame of aggregated values
"""
if _is_all_scan_aggregate(aggregations):
return self.scan_internal(values, aggregations)
return self.aggregate_internal(values, aggregations)
def shift(self, list values, int periods, list fill_values):
cdef table_view view = table_view_from_columns(values)
cdef size_type num_col = view.num_columns()
cdef vector[size_type] offsets = vector[size_type](num_col, periods)
cdef vector[reference_wrapper[constscalar]] c_fill_values
cdef DeviceScalar d_slr
d_slrs = []
c_fill_values.reserve(num_col)
for val, col in zip(fill_values, values):
d_slr = as_device_scalar(val, dtype=col.dtype)
d_slrs.append(d_slr)
c_fill_values.push_back(
reference_wrapper[constscalar](d_slr.get_raw_ptr()[0])
)
cdef pair[unique_ptr[table], unique_ptr[table]] c_result
with nogil:
c_result = move(
self.c_obj.get()[0].shift(view, offsets, c_fill_values)
)
grouped_keys = columns_from_unique_ptr(move(c_result.first))
shifted = columns_from_unique_ptr(move(c_result.second))
return shifted, grouped_keys
def replace_nulls(self, list values, object method):
cdef table_view val_view = table_view_from_columns(values)
cdef pair[unique_ptr[table], unique_ptr[table]] c_result
cdef replace_policy policy = (
replace_policy.PRECEDING
if method == 'ffill' else replace_policy.FOLLOWING
)
cdef vector[replace_policy] policies = vector[replace_policy](
val_view.num_columns(), policy
)
with nogil:
c_result = move(
self.c_obj.get()[0].replace_nulls(val_view, policies)
)
return columns_from_unique_ptr(move(c_result.second))
_GROUPBY_SCANS = {"cumcount", "cumsum", "cummin", "cummax", "rank"}
def _is_all_scan_aggregate(all_aggs):
"""
Returns true if all are scan aggregations.
Raises
------
NotImplementedError
If both reduction aggregations and scan aggregations are present.
"""
def get_name(agg):
return agg.__name__ if callable(agg) else agg
all_scan = all(
get_name(agg_name) in _GROUPBY_SCANS for aggs in all_aggs
for agg_name in aggs
)
any_scan = any(
get_name(agg_name) in _GROUPBY_SCANS for aggs in all_aggs
for agg_name in aggs
)
if not all_scan and any_scan:
raise NotImplementedError(
"Cannot perform both aggregation and scan in one operation"
)
return all_scan and any_scan
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/types.pxd
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from libc.stdint cimport int32_t
from libcpp cimport bool
cimport cudf._lib.cpp.types as libcudf_types
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.lists.lists_column_view cimport lists_column_view
ctypedef bool underlying_type_t_order
ctypedef bool underlying_type_t_null_order
ctypedef bool underlying_type_t_sorted
ctypedef int32_t underlying_type_t_interpolation
ctypedef int32_t underlying_type_t_type_id
ctypedef bool underlying_type_t_null_policy
cdef dtype_from_column_view(column_view cv)
cdef libcudf_types.data_type dtype_to_data_type(dtype) except *
cpdef dtype_to_pylibcudf_type(dtype)
cdef bool is_decimal_type_id(libcudf_types.type_id tid) except *
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/interop.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from cpython cimport pycapsule
from libcpp.memory cimport shared_ptr, unique_ptr
from libcpp.utility cimport move
from libcpp.vector cimport vector
from pyarrow.lib cimport CTable, pyarrow_unwrap_table, pyarrow_wrap_table
from cudf._lib.cpp.interop cimport (
DLManagedTensor,
column_metadata,
from_arrow as cpp_from_arrow,
from_dlpack as cpp_from_dlpack,
to_arrow as cpp_to_arrow,
to_dlpack as cpp_to_dlpack,
)
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
from cudf.api.types import is_list_dtype, is_struct_dtype
from cudf.core.buffer import acquire_spill_lock
def from_dlpack(dlpack_capsule):
"""
Converts a DLPack Tensor PyCapsule into a list of columns.
DLPack Tensor PyCapsule is expected to have the name "dltensor".
"""
cdef DLManagedTensor* dlpack_tensor = <DLManagedTensor*>pycapsule.\
PyCapsule_GetPointer(dlpack_capsule, 'dltensor')
pycapsule.PyCapsule_SetName(dlpack_capsule, 'used_dltensor')
cdef unique_ptr[table] c_result
with nogil:
c_result = move(
cpp_from_dlpack(dlpack_tensor)
)
res = columns_from_unique_ptr(move(c_result))
dlpack_tensor.deleter(dlpack_tensor)
return res
def to_dlpack(list source_columns):
"""
Converts a list of columns into a DLPack Tensor PyCapsule.
DLPack Tensor PyCapsule will have the name "dltensor".
"""
if any(column.null_count for column in source_columns):
raise ValueError(
"Cannot create a DLPack tensor with null values. \
Input is required to have null count as zero."
)
cdef DLManagedTensor *dlpack_tensor
cdef table_view source_table_view = table_view_from_columns(source_columns)
with nogil:
dlpack_tensor = cpp_to_dlpack(
source_table_view
)
return pycapsule.PyCapsule_New(
dlpack_tensor,
'dltensor',
dlmanaged_tensor_pycapsule_deleter
)
cdef void dlmanaged_tensor_pycapsule_deleter(object pycap_obj) noexcept:
cdef DLManagedTensor* dlpack_tensor = <DLManagedTensor*>0
try:
dlpack_tensor = <DLManagedTensor*>pycapsule.PyCapsule_GetPointer(
pycap_obj, 'used_dltensor')
return # we do not call a used capsule's deleter
except Exception:
dlpack_tensor = <DLManagedTensor*>pycapsule.PyCapsule_GetPointer(
pycap_obj, 'dltensor')
dlpack_tensor.deleter(dlpack_tensor)
cdef vector[column_metadata] gather_metadata(object cols_dtypes) except *:
"""
Generates a column_metadata vector for each column.
Parameters
----------
cols_dtypes : iterable
An iterable of ``(column_name, dtype)`` pairs.
"""
cdef vector[column_metadata] cpp_metadata
cpp_metadata.reserve(len(cols_dtypes))
if cols_dtypes is not None:
for idx, (col_name, col_dtype) in enumerate(cols_dtypes):
cpp_metadata.push_back(column_metadata(col_name.encode()))
if is_struct_dtype(col_dtype) or is_list_dtype(col_dtype):
_set_col_children_metadata(col_dtype, cpp_metadata[idx])
else:
raise TypeError(
"An iterable of (column_name, dtype) pairs is required to "
"construct column_metadata"
)
return cpp_metadata
cdef _set_col_children_metadata(dtype,
column_metadata& col_meta):
cdef column_metadata element_metadata
if is_struct_dtype(dtype):
for name, value in dtype.fields.items():
element_metadata = column_metadata(name.encode())
_set_col_children_metadata(
value, element_metadata
)
col_meta.children_meta.push_back(element_metadata)
elif is_list_dtype(dtype):
col_meta.children_meta.reserve(2)
# Offsets - child 0
col_meta.children_meta.push_back(column_metadata())
# Element column - child 1
element_metadata = column_metadata()
_set_col_children_metadata(
dtype.element_type, element_metadata
)
col_meta.children_meta.push_back(element_metadata)
else:
col_meta.children_meta.push_back(column_metadata())
@acquire_spill_lock()
def to_arrow(list source_columns, object column_dtypes):
"""Convert a list of columns from
cudf Frame to a PyArrow Table.
Parameters
----------
source_columns : a list of columns to convert
column_dtypes : Iterable of ``(column_name, column_dtype)`` pairs
Returns
-------
pyarrow table
"""
cdef vector[column_metadata] cpp_metadata = gather_metadata(column_dtypes)
cdef table_view input_table_view = table_view_from_columns(source_columns)
cdef shared_ptr[CTable] cpp_arrow_table
with nogil:
cpp_arrow_table = cpp_to_arrow(
input_table_view, cpp_metadata
)
return pyarrow_wrap_table(cpp_arrow_table)
@acquire_spill_lock()
def from_arrow(object input_table):
"""Convert from PyArrow Table to a list of columns.
Parameters
----------
input_table : PyArrow table
Returns
-------
A list of columns to construct Frame object
"""
cdef shared_ptr[CTable] cpp_arrow_table = (
pyarrow_unwrap_table(input_table)
)
cdef unique_ptr[table] c_result
with nogil:
c_result = move(cpp_from_arrow(cpp_arrow_table.get()[0]))
return columns_from_unique_ptr(move(c_result))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/CMakeLists.txt
|
# =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources
aggregation.pyx
avro.pyx
binaryop.pyx
column.pyx
concat.pyx
copying.pyx
csv.pyx
datetime.pyx
expressions.pyx
filling.pyx
groupby.pyx
hash.pyx
interop.pyx
join.pyx
json.pyx
labeling.pyx
lists.pyx
merge.pyx
null_mask.pyx
orc.pyx
parquet.pyx
partitioning.pyx
quantiles.pyx
reduce.pyx
replace.pyx
reshape.pyx
rolling.pyx
round.pyx
scalar.pyx
search.pyx
sort.pyx
stream_compaction.pyx
string_casting.pyx
strings_udf.pyx
text.pyx
timezone.pyx
transform.pyx
transpose.pyx
types.pyx
unary.pyx
utils.pyx
)
set(linked_libraries cudf::cudf)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" ASSOCIATED_TARGETS cudf
)
target_link_libraries(strings_udf cudf_strings_udf)
# TODO: Finding NumPy currently requires finding Development due to a bug in CMake. This bug was
# fixed in https://gitlab.kitware.com/cmake/cmake/-/merge_requests/7410 and will be available in
# CMake 3.24, so we can remove the Development component once we upgrade to CMake 3.24.
# find_package(Python REQUIRED COMPONENTS Development NumPy)
# Note: The bug noted above prevents us from finding NumPy successfully using FindPython.cmake
# inside the manylinux images used to build wheels because manylinux images do not contain
# libpython.so and therefore Development cannot be found. Until we upgrade to CMake 3.24, we should
# use FindNumpy.cmake instead (provided by scikit-build). When we switch to 3.24 we can try
# switching back, but it may not work if that implicitly still requires Python libraries. In that
# case we'll need to follow up with the CMake team to remove that dependency. The stopgap solution
# is to unpack the static lib tarballs in the wheel building jobs so that there are at least static
# libs to be found, but that should be a last resort since it implies a dependency that isn't really
# necessary. The relevant command is tar -xf /opt/_internal/static-libs-for-embedding-only.tar.xz -C
# /opt/_internal"
find_package(NumPy REQUIRED)
set(targets_using_dlpack interop)
foreach(target IN LISTS targets_using_dlpack)
target_include_directories(${target} PRIVATE "${DLPACK_INCLUDE_DIR}")
endforeach()
find_package(Python 3.9 REQUIRED COMPONENTS Interpreter)
execute_process(
COMMAND "${Python_EXECUTABLE}" -c "import pyarrow; print(pyarrow.get_include())"
OUTPUT_VARIABLE PYARROW_INCLUDE_DIR
ERROR_VARIABLE PYARROW_ERROR
RESULT_VARIABLE PYARROW_RESULT
OUTPUT_STRIP_TRAILING_WHITESPACE
)
if(${PYARROW_RESULT})
message(FATAL_ERROR "Error while trying to obtain pyarrow include directory:\n${PYARROW_ERROR}")
endif()
# TODO: Due to cudf's scalar.pyx needing to cimport pylibcudf's scalar.pyx (because there are parts
# of cudf Cython that need to directly access the c_obj underlying the pylibcudf Scalar) the
# requirement for arrow headers infects all of cudf. That in turn requires including numpy headers.
# These requirements will go away once all scalar-related Cython code is removed from cudf.
foreach(target IN LISTS RAPIDS_CYTHON_CREATED_TARGETS)
target_include_directories(${target} PRIVATE "${NumPy_INCLUDE_DIRS}")
target_include_directories(${target} PRIVATE "${PYARROW_INCLUDE_DIR}")
endforeach()
add_subdirectory(cpp)
add_subdirectory(io)
add_subdirectory(nvtext)
add_subdirectory(pylibcudf)
add_subdirectory(strings)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/reshape.pyx
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
from cudf.core.buffer import acquire_spill_lock
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.reshape cimport (
interleave_columns as cpp_interleave_columns,
tile as cpp_tile,
)
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport size_type
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
@acquire_spill_lock()
def interleave_columns(list source_columns):
cdef table_view c_view = table_view_from_columns(source_columns)
cdef unique_ptr[column] c_result
with nogil:
c_result = move(cpp_interleave_columns(c_view))
return Column.from_unique_ptr(move(c_result))
@acquire_spill_lock()
def tile(list source_columns, size_type count):
cdef size_type c_count = count
cdef table_view c_view = table_view_from_columns(source_columns)
cdef unique_ptr[table] c_result
with nogil:
c_result = move(cpp_tile(c_view, c_count))
return columns_from_unique_ptr(move(c_result))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/binaryop.pxd
|
# Copyright (c) 2020, NVIDIA CORPORATION.
from libc.stdint cimport int32_t
ctypedef int32_t underlying_type_t_binary_operator
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/types.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from enum import IntEnum
import numpy as np
from libcpp.memory cimport make_shared, shared_ptr
cimport cudf._lib.cpp.types as libcudf_types
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.lists.lists_column_view cimport lists_column_view
from cudf._lib.types cimport (
underlying_type_t_interpolation,
underlying_type_t_order,
underlying_type_t_sorted,
)
import cudf
from cudf._lib import pylibcudf
size_type_dtype = np.dtype("int32")
class TypeId(IntEnum):
EMPTY = <underlying_type_t_type_id> libcudf_types.type_id.EMPTY
INT8 = <underlying_type_t_type_id> libcudf_types.type_id.INT8
INT16 = <underlying_type_t_type_id> libcudf_types.type_id.INT16
INT32 = <underlying_type_t_type_id> libcudf_types.type_id.INT32
INT64 = <underlying_type_t_type_id> libcudf_types.type_id.INT64
UINT8 = <underlying_type_t_type_id> libcudf_types.type_id.UINT8
UINT16 = <underlying_type_t_type_id> libcudf_types.type_id.UINT16
UINT32 = <underlying_type_t_type_id> libcudf_types.type_id.UINT32
UINT64 = <underlying_type_t_type_id> libcudf_types.type_id.UINT64
FLOAT32 = <underlying_type_t_type_id> libcudf_types.type_id.FLOAT32
FLOAT64 = <underlying_type_t_type_id> libcudf_types.type_id.FLOAT64
BOOL8 = <underlying_type_t_type_id> libcudf_types.type_id.BOOL8
TIMESTAMP_DAYS = (
<underlying_type_t_type_id> libcudf_types.type_id.TIMESTAMP_DAYS
)
TIMESTAMP_SECONDS = (
<underlying_type_t_type_id> libcudf_types.type_id.TIMESTAMP_SECONDS
)
TIMESTAMP_MILLISECONDS = (
<underlying_type_t_type_id> (
libcudf_types.type_id.TIMESTAMP_MILLISECONDS
)
)
TIMESTAMP_MICROSECONDS = (
<underlying_type_t_type_id> (
libcudf_types.type_id.TIMESTAMP_MICROSECONDS
)
)
TIMESTAMP_NANOSECONDS = (
<underlying_type_t_type_id> libcudf_types.type_id.TIMESTAMP_NANOSECONDS
)
DURATION_SECONDS = (
<underlying_type_t_type_id> libcudf_types.type_id.DURATION_SECONDS
)
DURATION_MILLISECONDS = (
<underlying_type_t_type_id> libcudf_types.type_id.DURATION_MILLISECONDS
)
DURATION_MICROSECONDS = (
<underlying_type_t_type_id> libcudf_types.type_id.DURATION_MICROSECONDS
)
DURATION_NANOSECONDS = (
<underlying_type_t_type_id> libcudf_types.type_id.DURATION_NANOSECONDS
)
STRING = <underlying_type_t_type_id> libcudf_types.type_id.STRING
DECIMAL32 = <underlying_type_t_type_id> libcudf_types.type_id.DECIMAL32
DECIMAL64 = <underlying_type_t_type_id> libcudf_types.type_id.DECIMAL64
DECIMAL128 = <underlying_type_t_type_id> libcudf_types.type_id.DECIMAL128
STRUCT = <underlying_type_t_type_id> libcudf_types.type_id.STRUCT
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES = {
np.dtype("int8"): TypeId.INT8,
np.dtype("int16"): TypeId.INT16,
np.dtype("int32"): TypeId.INT32,
np.dtype("int64"): TypeId.INT64,
np.dtype("uint8"): TypeId.UINT8,
np.dtype("uint16"): TypeId.UINT16,
np.dtype("uint32"): TypeId.UINT32,
np.dtype("uint64"): TypeId.UINT64,
np.dtype("float32"): TypeId.FLOAT32,
np.dtype("float64"): TypeId.FLOAT64,
np.dtype("datetime64[s]"): TypeId.TIMESTAMP_SECONDS,
np.dtype("datetime64[ms]"): TypeId.TIMESTAMP_MILLISECONDS,
np.dtype("datetime64[us]"): TypeId.TIMESTAMP_MICROSECONDS,
np.dtype("datetime64[ns]"): TypeId.TIMESTAMP_NANOSECONDS,
np.dtype("object"): TypeId.STRING,
np.dtype("bool"): TypeId.BOOL8,
np.dtype("timedelta64[s]"): TypeId.DURATION_SECONDS,
np.dtype("timedelta64[ms]"): TypeId.DURATION_MILLISECONDS,
np.dtype("timedelta64[us]"): TypeId.DURATION_MICROSECONDS,
np.dtype("timedelta64[ns]"): TypeId.DURATION_NANOSECONDS,
}
SUPPORTED_NUMPY_TO_PYLIBCUDF_TYPES = {
k: pylibcudf.TypeId(v).value
for k, v in SUPPORTED_NUMPY_TO_LIBCUDF_TYPES.items()
}
LIBCUDF_TO_SUPPORTED_NUMPY_TYPES = {
# There's no equivalent to EMPTY in cudf. We translate EMPTY
# columns from libcudf to ``int8`` columns of all nulls in Python.
# ``int8`` is chosen because it uses the least amount of memory.
TypeId.EMPTY: np.dtype("int8"),
TypeId.INT8: np.dtype("int8"),
TypeId.INT16: np.dtype("int16"),
TypeId.INT32: np.dtype("int32"),
TypeId.INT64: np.dtype("int64"),
TypeId.UINT8: np.dtype("uint8"),
TypeId.UINT16: np.dtype("uint16"),
TypeId.UINT32: np.dtype("uint32"),
TypeId.UINT64: np.dtype("uint64"),
TypeId.FLOAT32: np.dtype("float32"),
TypeId.FLOAT64: np.dtype("float64"),
TypeId.BOOL8: np.dtype("bool"),
TypeId.TIMESTAMP_SECONDS: np.dtype("datetime64[s]"),
TypeId.TIMESTAMP_MILLISECONDS: np.dtype("datetime64[ms]"),
TypeId.TIMESTAMP_MICROSECONDS: np.dtype("datetime64[us]"),
TypeId.TIMESTAMP_NANOSECONDS: np.dtype("datetime64[ns]"),
TypeId.DURATION_SECONDS: np.dtype("timedelta64[s]"),
TypeId.DURATION_MILLISECONDS: np.dtype("timedelta64[ms]"),
TypeId.DURATION_MICROSECONDS: np.dtype("timedelta64[us]"),
TypeId.DURATION_NANOSECONDS: np.dtype("timedelta64[ns]"),
TypeId.STRING: np.dtype("object"),
TypeId.STRUCT: np.dtype("object"),
}
PYLIBCUDF_TO_SUPPORTED_NUMPY_TYPES = {
pylibcudf.TypeId(k).value: v
for k, v in LIBCUDF_TO_SUPPORTED_NUMPY_TYPES.items()
}
duration_unit_map = {
TypeId.DURATION_SECONDS: "s",
TypeId.DURATION_MILLISECONDS: "ms",
TypeId.DURATION_MICROSECONDS: "us",
TypeId.DURATION_NANOSECONDS: "ns"
}
datetime_unit_map = {
TypeId.TIMESTAMP_SECONDS: "s",
TypeId.TIMESTAMP_MILLISECONDS: "ms",
TypeId.TIMESTAMP_MICROSECONDS: "us",
TypeId.TIMESTAMP_NANOSECONDS: "ns",
}
class Interpolation(IntEnum):
LINEAR = (
<underlying_type_t_interpolation> libcudf_types.interpolation.LINEAR
)
LOWER = (
<underlying_type_t_interpolation> libcudf_types.interpolation.LOWER
)
HIGHER = (
<underlying_type_t_interpolation> libcudf_types.interpolation.HIGHER
)
MIDPOINT = (
<underlying_type_t_interpolation> libcudf_types.interpolation.MIDPOINT
)
NEAREST = (
<underlying_type_t_interpolation> libcudf_types.interpolation.NEAREST
)
class Order(IntEnum):
ASCENDING = <underlying_type_t_order> libcudf_types.order.ASCENDING
DESCENDING = <underlying_type_t_order> libcudf_types.order.DESCENDING
class Sorted(IntEnum):
YES = <underlying_type_t_sorted> libcudf_types.sorted.YES
NO = <underlying_type_t_sorted> libcudf_types.sorted.NO
class NullOrder(IntEnum):
BEFORE = <underlying_type_t_order> libcudf_types.null_order.BEFORE
AFTER = <underlying_type_t_order> libcudf_types.null_order.AFTER
class NullHandling(IntEnum):
INCLUDE = <underlying_type_t_null_policy> libcudf_types.null_policy.INCLUDE
EXCLUDE = <underlying_type_t_null_policy> libcudf_types.null_policy.EXCLUDE
cdef dtype_from_lists_column_view(column_view cv):
# lists_column_view have no default constructor, so we heap
# allocate it to get around Cython's limitation of requiring
# default constructors for stack allocated objects
cdef shared_ptr[lists_column_view] lv = make_shared[lists_column_view](cv)
cdef column_view child = lv.get()[0].child()
if child.type().id() == libcudf_types.type_id.LIST:
return cudf.ListDtype(dtype_from_lists_column_view(child))
elif child.type().id() == libcudf_types.type_id.EMPTY:
return cudf.ListDtype("int8")
else:
return cudf.ListDtype(
dtype_from_column_view(child)
)
cdef dtype_from_structs_column_view(column_view cv):
fields = {
str(i): dtype_from_column_view(cv.child(i))
for i in range(cv.num_children())
}
return cudf.StructDtype(fields)
cdef dtype_from_column_view(column_view cv):
cdef libcudf_types.type_id tid = cv.type().id()
if tid == libcudf_types.type_id.LIST:
return dtype_from_lists_column_view(cv)
elif tid == libcudf_types.type_id.STRUCT:
return dtype_from_structs_column_view(cv)
elif tid == libcudf_types.type_id.DECIMAL64:
return cudf.Decimal64Dtype(
precision=cudf.Decimal64Dtype.MAX_PRECISION,
scale=-cv.type().scale()
)
elif tid == libcudf_types.type_id.DECIMAL32:
return cudf.Decimal32Dtype(
precision=cudf.Decimal32Dtype.MAX_PRECISION,
scale=-cv.type().scale()
)
elif tid == libcudf_types.type_id.DECIMAL128:
return cudf.Decimal128Dtype(
precision=cudf.Decimal128Dtype.MAX_PRECISION,
scale=-cv.type().scale()
)
else:
return LIBCUDF_TO_SUPPORTED_NUMPY_TYPES[
<underlying_type_t_type_id>(tid)
]
cdef libcudf_types.data_type dtype_to_data_type(dtype) except *:
cdef libcudf_types.type_id tid
if cudf.api.types.is_list_dtype(dtype):
tid = libcudf_types.type_id.LIST
elif cudf.api.types.is_struct_dtype(dtype):
tid = libcudf_types.type_id.STRUCT
elif cudf.api.types.is_decimal128_dtype(dtype):
tid = libcudf_types.type_id.DECIMAL128
elif cudf.api.types.is_decimal64_dtype(dtype):
tid = libcudf_types.type_id.DECIMAL64
elif cudf.api.types.is_decimal32_dtype(dtype):
tid = libcudf_types.type_id.DECIMAL32
else:
tid = <libcudf_types.type_id> (
<underlying_type_t_type_id> (
SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[np.dtype(dtype)]))
if is_decimal_type_id(tid):
return libcudf_types.data_type(tid, -dtype.scale)
else:
return libcudf_types.data_type(tid)
cpdef dtype_to_pylibcudf_type(dtype):
if cudf.api.types.is_list_dtype(dtype):
return pylibcudf.DataType(pylibcudf.TypeId.LIST)
elif cudf.api.types.is_struct_dtype(dtype):
return pylibcudf.DataType(pylibcudf.TypeId.STRUCT)
elif cudf.api.types.is_decimal_dtype(dtype):
if cudf.api.types.is_decimal128_dtype(dtype):
tid = pylibcudf.TypeId.DECIMAL128
elif cudf.api.types.is_decimal64_dtype(dtype):
tid = pylibcudf.TypeId.DECIMAL64
else:
tid = pylibcudf.TypeId.DECIMAL32
return pylibcudf.DataType(tid, -dtype.scale)
return pylibcudf.DataType(
SUPPORTED_NUMPY_TO_PYLIBCUDF_TYPES[np.dtype(dtype)]
)
cdef bool is_decimal_type_id(libcudf_types.type_id tid) except *:
return tid in (
libcudf_types.type_id.DECIMAL128,
libcudf_types.type_id.DECIMAL64,
libcudf_types.type_id.DECIMAL32,
)
def dtype_from_pylibcudf_lists_column(col):
child = col.list_view().child()
tid = child.type().id()
if tid == pylibcudf.TypeId.LIST:
return cudf.ListDtype(dtype_from_pylibcudf_lists_column(child))
elif tid == pylibcudf.TypeId.EMPTY:
return cudf.ListDtype("int8")
else:
return cudf.ListDtype(
dtype_from_pylibcudf_column(child)
)
def dtype_from_pylibcudf_structs_column(col):
fields = {
str(i): dtype_from_pylibcudf_column(col.child(i))
for i in range(col.num_children())
}
return cudf.StructDtype(fields)
def dtype_from_pylibcudf_column(col):
type_ = col.type()
tid = type_.id()
if tid == pylibcudf.TypeId.LIST:
return dtype_from_pylibcudf_lists_column(col)
elif tid == pylibcudf.TypeId.STRUCT:
return dtype_from_pylibcudf_structs_column(col)
elif tid == pylibcudf.TypeId.DECIMAL64:
return cudf.Decimal64Dtype(
precision=cudf.Decimal64Dtype.MAX_PRECISION,
scale=-type_.scale()
)
elif tid == pylibcudf.TypeId.DECIMAL32:
return cudf.Decimal32Dtype(
precision=cudf.Decimal32Dtype.MAX_PRECISION,
scale=-type_.scale()
)
elif tid == pylibcudf.TypeId.DECIMAL128:
return cudf.Decimal128Dtype(
precision=cudf.Decimal128Dtype.MAX_PRECISION,
scale=-type_.scale()
)
else:
return PYLIBCUDF_TO_SUPPORTED_NUMPY_TYPES[
<underlying_type_t_type_id>(tid)
]
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/transpose.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from libcpp.memory cimport unique_ptr
from libcpp.pair cimport pair
from libcpp.utility cimport move
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.transpose cimport transpose as cpp_transpose
from cudf._lib.utils cimport columns_from_table_view, table_view_from_columns
def transpose(list source_columns):
"""Transpose m n-row columns into n m-row columns
"""
cdef pair[unique_ptr[column], table_view] c_result
cdef table_view c_input = table_view_from_columns(source_columns)
with nogil:
c_result = move(cpp_transpose(c_input))
# Notice, the data pointer of `result_owner` has been exposed
# through `c_result.second` at this point.
result_owner = Column.from_unique_ptr(
move(c_result.first), data_ptr_exposed=True
)
return columns_from_table_view(
c_result.second,
owners=[result_owner] * c_result.second.num_columns()
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/filling.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from cudf.core.buffer import acquire_spill_lock
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move
cimport cudf._lib.cpp.filling as cpp_filling
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view, mutable_column_view
from cudf._lib.cpp.scalar.scalar cimport scalar
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport size_type
from cudf._lib.scalar cimport DeviceScalar
from cudf._lib.utils cimport columns_from_unique_ptr, table_view_from_columns
@acquire_spill_lock()
def fill_in_place(Column destination, int begin, int end, DeviceScalar value):
cdef mutable_column_view c_destination = destination.mutable_view()
cdef size_type c_begin = <size_type> begin
cdef size_type c_end = <size_type> end
cdef const scalar* c_value = value.get_raw_ptr()
cpp_filling.fill_in_place(
c_destination,
c_begin,
c_end,
c_value[0]
)
@acquire_spill_lock()
def fill(Column destination, int begin, int end, DeviceScalar value):
cdef column_view c_destination = destination.view()
cdef size_type c_begin = <size_type> begin
cdef size_type c_end = <size_type> end
cdef const scalar* c_value = value.get_raw_ptr()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(cpp_filling.fill(
c_destination,
c_begin,
c_end,
c_value[0]
))
return Column.from_unique_ptr(move(c_result))
@acquire_spill_lock()
def repeat(list inp, object count):
if isinstance(count, Column):
return _repeat_via_column(inp, count)
else:
return _repeat_via_size_type(inp, count)
def _repeat_via_column(list inp, Column count):
cdef table_view c_inp = table_view_from_columns(inp)
cdef column_view c_count = count.view()
cdef unique_ptr[table] c_result
with nogil:
c_result = move(cpp_filling.repeat(
c_inp,
c_count,
))
return columns_from_unique_ptr(move(c_result))
def _repeat_via_size_type(list inp, size_type count):
cdef table_view c_inp = table_view_from_columns(inp)
cdef unique_ptr[table] c_result
with nogil:
c_result = move(cpp_filling.repeat(
c_inp,
count
))
return columns_from_unique_ptr(move(c_result))
@acquire_spill_lock()
def sequence(int size, DeviceScalar init, DeviceScalar step):
cdef size_type c_size = size
cdef const scalar* c_init = init.get_raw_ptr()
cdef const scalar* c_step = step.get_raw_ptr()
cdef unique_ptr[column] c_result
with nogil:
c_result = move(cpp_filling.sequence(
c_size,
c_init[0],
c_step[0]
))
return Column.from_unique_ptr(move(c_result))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/scalar.pxd
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from rmm._lib.memory_resource cimport DeviceMemoryResource
# TODO: Would like to remove this cimport, but it will require some more work
# to excise all C code in scalar.pyx that relies on using the C API of the
# pylibcudf Scalar underlying the DeviceScalar.
from cudf._lib cimport pylibcudf
from cudf._lib.cpp.scalar.scalar cimport scalar
cdef class DeviceScalar:
cdef pylibcudf.Scalar c_value
cdef object _dtype
cdef const scalar* get_raw_ptr(self) except *
@staticmethod
cdef DeviceScalar from_unique_ptr(unique_ptr[scalar] ptr, dtype=*)
cpdef bool is_valid(DeviceScalar s)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/expressions.pxd
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
from libc.stdint cimport int32_t, int64_t
from libcpp.memory cimport unique_ptr
from cudf._lib.cpp.expressions cimport (
column_reference,
expression,
literal,
operation,
)
from cudf._lib.cpp.scalar.scalar cimport numeric_scalar, scalar, string_scalar
cdef class Expression:
cdef unique_ptr[expression] c_obj
cdef class Literal(Expression):
cdef unique_ptr[scalar] c_scalar
cdef class ColumnReference(Expression):
pass
cdef class Operation(Expression):
pass
cdef class ColumnNameReference(Expression):
pass
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/reduce.pyx
|
# Copyright (c) 2020-2022, NVIDIA CORPORATION.
from cython.operator import dereference
import cudf
from cudf.core.buffer import acquire_spill_lock
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport move, pair
from cudf._lib.aggregation cimport (
ReduceAggregation,
ScanAggregation,
make_reduce_aggregation,
make_scan_aggregation,
)
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.reduce cimport cpp_minmax, cpp_reduce, cpp_scan, scan_type
from cudf._lib.cpp.scalar.scalar cimport scalar
from cudf._lib.cpp.types cimport data_type
from cudf._lib.scalar cimport DeviceScalar
from cudf._lib.types cimport dtype_to_data_type, is_decimal_type_id
@acquire_spill_lock()
def reduce(reduction_op, Column incol, dtype=None, **kwargs):
"""
Top level Cython reduce function wrapping libcudf reductions.
Parameters
----------
reduction_op : string
A string specifying the operation, e.g. sum, prod
incol : Column
A cuDF Column object
dtype: numpy.dtype, optional
A numpy data type to use for the output, defaults
to the same type as the input column
"""
col_dtype = (
dtype if dtype is not None
else incol._reduction_result_dtype(reduction_op)
)
cdef column_view c_incol_view = incol.view()
cdef unique_ptr[scalar] c_result
cdef ReduceAggregation cython_agg = make_reduce_aggregation(
reduction_op, kwargs)
cdef data_type c_out_dtype = dtype_to_data_type(col_dtype)
# check empty case
if len(incol) <= incol.null_count:
if reduction_op == 'sum' or reduction_op == 'sum_of_squares':
return incol.dtype.type(0)
if reduction_op == 'product':
return incol.dtype.type(1)
if reduction_op == "any":
return False
return cudf.utils.dtypes._get_nan_for_dtype(col_dtype)
with nogil:
c_result = move(cpp_reduce(
c_incol_view,
dereference(cython_agg.c_obj),
c_out_dtype
))
if is_decimal_type_id(c_result.get()[0].type().id()):
scale = -c_result.get()[0].type().scale()
precision = _reduce_precision(col_dtype, reduction_op, len(incol))
py_result = DeviceScalar.from_unique_ptr(
move(c_result), dtype=col_dtype.__class__(precision, scale)
)
else:
py_result = DeviceScalar.from_unique_ptr(move(c_result))
return py_result.value
@acquire_spill_lock()
def scan(scan_op, Column incol, inclusive, **kwargs):
"""
Top level Cython scan function wrapping libcudf scans.
Parameters
----------
incol : Column
A cuDF Column object
scan_op : string
A string specifying the operation, e.g. cumprod
inclusive: bool
Flag for including nulls in relevant scan
"""
cdef column_view c_incol_view = incol.view()
cdef unique_ptr[column] c_result
cdef ScanAggregation cython_agg = make_scan_aggregation(scan_op, kwargs)
cdef scan_type c_inclusive = \
scan_type.INCLUSIVE if inclusive else scan_type.EXCLUSIVE
with nogil:
c_result = move(cpp_scan(
c_incol_view,
dereference(cython_agg.c_obj),
c_inclusive
))
py_result = Column.from_unique_ptr(move(c_result))
return py_result
@acquire_spill_lock()
def minmax(Column incol):
"""
Top level Cython minmax function wrapping libcudf minmax.
Parameters
----------
incol : Column
A cuDF Column object
Returns
-------
A pair of ``(min, max)`` values of ``incol``
"""
cdef column_view c_incol_view = incol.view()
cdef pair[unique_ptr[scalar], unique_ptr[scalar]] c_result
with nogil:
c_result = move(cpp_minmax(c_incol_view))
py_result_min = DeviceScalar.from_unique_ptr(move(c_result.first))
py_result_max = DeviceScalar.from_unique_ptr(move(c_result.second))
return (
cudf.Scalar.from_device_scalar(py_result_min),
cudf.Scalar.from_device_scalar(py_result_max)
)
def _reduce_precision(dtype, op, nrows):
"""
Returns the result precision when performing the reduce
operation `op` for the given dtype and column size.
See: https://docs.microsoft.com/en-us/sql/t-sql/data-types/precision-scale-and-length-transact-sql
""" # noqa: E501
p = dtype.precision
if op in ("min", "max"):
new_p = p
elif op == "sum":
new_p = p + nrows - 1
elif op == "product":
new_p = p * nrows + nrows - 1
elif op == "sum_of_squares":
new_p = 2 * p + nrows
else:
raise NotImplementedError()
return max(min(new_p, dtype.MAX_PRECISION), 0)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/transform.pyx
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from numba.np import numpy_support
import cudf
from cudf._lib.types import SUPPORTED_NUMPY_TO_LIBCUDF_TYPES
from cudf.core._internals.expressions import parse_expression
from cudf.core.buffer import acquire_spill_lock, as_buffer
from cudf.utils import cudautils
from cython.operator cimport dereference
from libc.stdint cimport uintptr_t
from libcpp.memory cimport unique_ptr
from libcpp.pair cimport pair
from libcpp.string cimport string
from libcpp.utility cimport move
from rmm._lib.device_buffer cimport DeviceBuffer, device_buffer
cimport cudf._lib.cpp.transform as libcudf_transform
from cudf._lib.column cimport Column
from cudf._lib.cpp.column.column cimport column
from cudf._lib.cpp.column.column_view cimport column_view
from cudf._lib.cpp.expressions cimport expression
from cudf._lib.cpp.table.table cimport table
from cudf._lib.cpp.table.table_view cimport table_view
from cudf._lib.cpp.types cimport bitmask_type, data_type, size_type, type_id
from cudf._lib.expressions cimport Expression
from cudf._lib.types cimport underlying_type_t_type_id
from cudf._lib.utils cimport (
columns_from_unique_ptr,
data_from_table_view,
table_view_from_columns,
)
@acquire_spill_lock()
def bools_to_mask(Column col):
"""
Given an int8 (boolean) column, compress the data from booleans to bits and
return a Buffer
"""
cdef column_view col_view = col.view()
cdef pair[unique_ptr[device_buffer], size_type] cpp_out
cdef unique_ptr[device_buffer] up_db
with nogil:
cpp_out = move(libcudf_transform.bools_to_mask(col_view))
up_db = move(cpp_out.first)
rmm_db = DeviceBuffer.c_from_unique_ptr(move(up_db))
buf = as_buffer(rmm_db)
return buf
@acquire_spill_lock()
def mask_to_bools(object mask_buffer, size_type begin_bit, size_type end_bit):
"""
Given a mask buffer, returns a boolean column representng bit 0 -> False
and 1 -> True within range of [begin_bit, end_bit),
"""
if not isinstance(mask_buffer, cudf.core.buffer.Buffer):
raise TypeError("mask_buffer is not an instance of "
"cudf.core.buffer.Buffer")
cdef bitmask_type* bit_mask = <bitmask_type*><uintptr_t>(
mask_buffer.get_ptr(mode="read")
)
cdef unique_ptr[column] result
with nogil:
result = move(
libcudf_transform.mask_to_bools(bit_mask, begin_bit, end_bit)
)
return Column.from_unique_ptr(move(result))
@acquire_spill_lock()
def nans_to_nulls(Column input):
cdef column_view c_input = input.view()
cdef pair[unique_ptr[device_buffer], size_type] c_output
cdef unique_ptr[device_buffer] c_buffer
with nogil:
c_output = move(libcudf_transform.nans_to_nulls(c_input))
c_buffer = move(c_output.first)
if c_output.second == 0:
return None
return as_buffer(DeviceBuffer.c_from_unique_ptr(move(c_buffer)))
@acquire_spill_lock()
def transform(Column input, op):
cdef column_view c_input = input.view()
cdef string c_str
cdef type_id c_tid
cdef data_type c_dtype
nb_type = numpy_support.from_dtype(input.dtype)
nb_signature = (nb_type,)
compiled_op = cudautils.compile_udf(op, nb_signature)
c_str = compiled_op[0].encode('UTF-8')
np_dtype = cudf.dtype(compiled_op[1])
try:
c_tid = <type_id> (
<underlying_type_t_type_id> SUPPORTED_NUMPY_TO_LIBCUDF_TYPES[
np_dtype
]
)
c_dtype = data_type(c_tid)
except KeyError:
raise TypeError(
"Result of window function has unsupported dtype {}"
.format(np_dtype)
)
with nogil:
c_output = move(libcudf_transform.transform(
c_input,
c_str,
c_dtype,
True
))
return Column.from_unique_ptr(move(c_output))
def table_encode(list source_columns):
cdef table_view c_input = table_view_from_columns(source_columns)
cdef pair[unique_ptr[table], unique_ptr[column]] c_result
with nogil:
c_result = move(libcudf_transform.encode(c_input))
return (
columns_from_unique_ptr(move(c_result.first)),
Column.from_unique_ptr(move(c_result.second))
)
def one_hot_encode(Column input_column, Column categories):
cdef column_view c_view_input = input_column.view()
cdef column_view c_view_categories = categories.view()
cdef pair[unique_ptr[column], table_view] c_result
with nogil:
c_result = move(
libcudf_transform.one_hot_encode(c_view_input, c_view_categories)
)
# Notice, the data pointer of `owner` has been exposed
# through `c_result.second` at this point.
owner = Column.from_unique_ptr(
move(c_result.first), data_ptr_exposed=True
)
pylist_categories = categories.to_arrow().to_pylist()
encodings, _ = data_from_table_view(
move(c_result.second),
owner=owner,
column_names=[
x if x is not None else '<NA>' for x in pylist_categories
]
)
return encodings
@acquire_spill_lock()
def compute_column(list columns, tuple column_names, expr: str):
"""Compute a new column by evaluating an expression on a set of columns.
Parameters
----------
columns : list
The set of columns forming the table to evaluate the expression on.
column_names : tuple[str]
The names associated with each column. These names are necessary to map
column names in the expression to indices in the provided list of
columns, which are what will be used by libcudf to evaluate the
expression on the table.
expr : str
The expression to evaluate.
"""
visitor = parse_expression(expr, column_names)
# At the end, all the stack contains is the expression to evaluate.
cdef Expression cudf_expr = visitor.expression
cdef table_view tbl = table_view_from_columns(columns)
cdef unique_ptr[column] col
with nogil:
col = move(
libcudf_transform.compute_column(
tbl,
<expression &> dereference(cudf_expr.c_obj.get())
)
)
return Column.from_unique_ptr(move(col))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/_lib/copying.pxd
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
from cudf._lib.cpp.contiguous_split cimport packed_columns
cdef class _CPackedColumns:
cdef packed_columns c_obj
cdef object column_names
cdef object column_dtypes
cdef object index_names
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.