repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_combining.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/strings/test_string_methods.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/general_functions/test_conversion.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/general_functions/test_data_manipulation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/general_functions/test_datetimelike.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/window/test_rolling.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/lists/test_list_methods.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_sorting.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_constructing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_conversion.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_interval.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import cudf
from cudf.core._compat import PANDAS_GE_210
from cudf.core.index import IntervalIndex, interval_range
from cudf.testing._utils import assert_eq
def test_interval_constructor_default_closed():
idx = cudf.IntervalIndex([pd.Interval(0, 1)])
assert idx.closed == "right"
assert idx.dtype.closed == "right"
def test_interval_to_arrow():
expect = pa.Array.from_pandas(pd.IntervalIndex([pd.Interval(0, 1)]))
got = cudf.IntervalIndex([pd.Interval(0, 1)]).to_arrow()
assert_eq(expect, got)
INTERVAL_BOUNDARY_TYPES = [
int,
np.int8,
np.int16,
np.int32,
np.int64,
np.float32,
np.float64,
cudf.Scalar,
]
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("start", [0, 1, 2, 3])
@pytest.mark.parametrize("end", [4, 5, 6, 7])
def test_interval_range_basic(start, end, closed):
pindex = pd.interval_range(start=start, end=end, closed=closed)
gindex = cudf.interval_range(start=start, end=end, closed=closed)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("start_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("end_t", INTERVAL_BOUNDARY_TYPES)
def test_interval_range_dtype_basic(start_t, end_t):
start, end = start_t(24), end_t(42)
start_val = start.value if isinstance(start, cudf.Scalar) else start
end_val = end.value if isinstance(end, cudf.Scalar) else end
pindex = pd.interval_range(start=start_val, end=end_val, closed="left")
gindex = cudf.interval_range(start=start, end=end, closed="left")
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("start", [0])
@pytest.mark.parametrize("end", [0])
def test_interval_range_empty(start, end, closed):
pindex = pd.interval_range(start=start, end=end, closed=closed)
gindex = cudf.interval_range(start=start, end=end, closed=closed)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("freq", [1, 2, 3])
@pytest.mark.parametrize("start", [0, 1, 2, 3, 5])
@pytest.mark.parametrize("end", [6, 8, 10, 43, 70])
def test_interval_range_freq_basic(start, end, freq, closed):
pindex = pd.interval_range(start=start, end=end, freq=freq, closed=closed)
gindex = cudf.interval_range(
start=start, end=end, freq=freq, closed=closed
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("start_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("end_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("freq_t", INTERVAL_BOUNDARY_TYPES)
def test_interval_range_freq_basic_dtype(start_t, end_t, freq_t):
start, end, freq = start_t(5), end_t(70), freq_t(3)
start_val = start.value if isinstance(start, cudf.Scalar) else start
end_val = end.value if isinstance(end, cudf.Scalar) else end
freq_val = freq.value if isinstance(freq, cudf.Scalar) else freq
pindex = pd.interval_range(
start=start_val, end=end_val, freq=freq_val, closed="left"
)
gindex = cudf.interval_range(
start=start, end=end, freq=freq, closed="left"
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("periods", [1, 1.0, 2, 2.0, 3.0, 3])
@pytest.mark.parametrize("start", [0, 0.0, 1.0, 1, 2, 2.0, 3.0, 3])
@pytest.mark.parametrize("end", [4, 4.0, 5.0, 5, 6, 6.0, 7.0, 7])
def test_interval_range_periods_basic(start, end, periods, closed):
pindex = pd.interval_range(
start=start, end=end, periods=periods, closed=closed
)
gindex = cudf.interval_range(
start=start, end=end, periods=periods, closed=closed
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("start_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("end_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("periods_t", INTERVAL_BOUNDARY_TYPES)
def test_interval_range_periods_basic_dtype(start_t, end_t, periods_t):
start, end, periods = start_t(0), end_t(4), periods_t(1.0)
start_val = start.value if isinstance(start, cudf.Scalar) else start
end_val = end.value if isinstance(end, cudf.Scalar) else end
periods_val = (
periods.value if isinstance(periods, cudf.Scalar) else periods
)
pindex = pd.interval_range(
start=start_val, end=end_val, periods=periods_val, closed="left"
)
gindex = cudf.interval_range(
start=start, end=end, periods=periods, closed="left"
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("periods", [1, 2, 3])
@pytest.mark.parametrize("freq", [1, 2, 3, 4])
@pytest.mark.parametrize("end", [4, 8, 9, 10])
def test_interval_range_periods_freq_end(end, freq, periods, closed):
pindex = pd.interval_range(
end=end, freq=freq, periods=periods, closed=closed
)
gindex = cudf.interval_range(
end=end, freq=freq, periods=periods, closed=closed
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("periods_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("freq_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("end_t", INTERVAL_BOUNDARY_TYPES)
def test_interval_range_periods_freq_end_dtype(periods_t, freq_t, end_t):
periods, freq, end = periods_t(2), freq_t(3), end_t(10)
freq_val = freq.value if isinstance(freq, cudf.Scalar) else freq
end_val = end.value if isinstance(end, cudf.Scalar) else end
periods_val = (
periods.value if isinstance(periods, cudf.Scalar) else periods
)
pindex = pd.interval_range(
end=end_val, freq=freq_val, periods=periods_val, closed="left"
)
gindex = cudf.interval_range(
end=end, freq=freq, periods=periods, closed="left"
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
@pytest.mark.parametrize("periods", [1, 2, 3])
@pytest.mark.parametrize("freq", [1, 2, 3, 4])
@pytest.mark.parametrize("start", [1, 4, 9, 12])
def test_interval_range_periods_freq_start(start, freq, periods, closed):
pindex = pd.interval_range(
start=start, freq=freq, periods=periods, closed=closed
)
gindex = cudf.interval_range(
start=start, freq=freq, periods=periods, closed=closed
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("periods_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("freq_t", INTERVAL_BOUNDARY_TYPES)
@pytest.mark.parametrize("start_t", INTERVAL_BOUNDARY_TYPES)
def test_interval_range_periods_freq_start_dtype(periods_t, freq_t, start_t):
periods, freq, start = periods_t(2), freq_t(3), start_t(9)
freq_val = freq.value if isinstance(freq, cudf.Scalar) else freq
start_val = start.value if isinstance(start, cudf.Scalar) else start
periods_val = (
periods.value if isinstance(periods, cudf.Scalar) else periods
)
pindex = pd.interval_range(
start=start_val, freq=freq_val, periods=periods_val, closed="left"
)
gindex = cudf.interval_range(
start=start, freq=freq, periods=periods, closed="left"
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["right", "left", "both", "neither"])
@pytest.mark.parametrize(
"data",
[
([pd.Interval(30, 50)]),
([pd.Interval(0, 3), pd.Interval(1, 7)]),
([pd.Interval(0.2, 60.3), pd.Interval(1, 7), pd.Interval(0, 0)]),
([]),
],
)
def test_interval_index_basic(data, closed):
pindex = pd.IntervalIndex(data, closed=closed)
gindex = IntervalIndex(data, closed=closed)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["right", "left", "both", "neither"])
def test_interval_index_empty(closed):
pindex = pd.IntervalIndex([], closed=closed)
gindex = IntervalIndex([], closed=closed)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["right", "left", "both", "neither"])
@pytest.mark.parametrize(
"data",
[
([pd.Interval(1, 6), pd.Interval(1, 10), pd.Interval(1, 3)]),
(
[
pd.Interval(3.5, 6.0),
pd.Interval(1.0, 7.0),
pd.Interval(0.0, 10.0),
]
),
(
[
pd.Interval(50, 100, closed="left"),
pd.Interval(1.0, 7.0, closed="left"),
pd.Interval(16, 322, closed="left"),
]
),
(
[
pd.Interval(50, 100, closed="right"),
pd.Interval(1.0, 7.0, closed="right"),
pd.Interval(16, 322, closed="right"),
]
),
],
)
def test_interval_index_many_params(data, closed):
pindex = pd.IntervalIndex(data, closed=closed)
gindex = IntervalIndex(data, closed=closed)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("closed", ["left", "right", "both", "neither"])
def test_interval_index_from_breaks(closed):
breaks = [0, 3, 6, 10]
pindex = pd.IntervalIndex.from_breaks(breaks, closed=closed)
gindex = IntervalIndex.from_breaks(breaks, closed=closed)
assert_eq(pindex, gindex)
@pytest.mark.parametrize(
"start, stop, freq, periods",
[
(0.0, None, 0.2, 5),
(0.0, 1.0, None, 5),
pytest.param(
0.0,
1.0,
0.2,
None,
marks=pytest.mark.xfail(
condition=not PANDAS_GE_210,
reason="https://github.com/pandas-dev/pandas/pull/54477",
),
),
(None, 1.0, 0.2, 5),
pytest.param(
0.0,
1.0,
0.1,
None,
marks=pytest.mark.xfail(
condition=not PANDAS_GE_210,
reason="https://github.com/pandas-dev/pandas/pull/54477",
),
),
(0.0, 1.0, None, 10),
(0.0, None, 0.25, 4),
(1.0, None, 2.5, 2),
],
)
def test_interval_range_floating(start, stop, freq, periods):
expected = pd.interval_range(
start=start, end=stop, freq=freq, periods=periods
)
got = interval_range(start=start, end=stop, freq=freq, periods=periods)
assert_eq(expected, got)
def test_intervalindex_empty_typed_non_int():
data = np.array([], dtype="datetime64[ns]")
result = cudf.IntervalIndex(data)
expected = pd.IntervalIndex(data)
assert_eq(result, expected)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_properties.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_modifying.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_computation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_missing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_numeric.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_selecting.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_memory_usage.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_categorical.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_time_specific.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_multiindex_compat.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/test_combining.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/timedelta/test_constructing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/timedelta/test_conversion.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/timedelta/test_components.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/multiindex/test_constructing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/multiindex/test_properties.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/multiindex/test_selecting.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/datetime/test_constructing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/datetime/test_conversion.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/datetime/test_indexing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import pandas as pd
import cudf
from cudf.testing._utils import assert_eq
def test_slice_datetimetz_index():
data = ["2001-01-01", "2001-01-02", None, None, "2001-01-03"]
pidx = pd.DatetimeIndex(data, dtype="datetime64[ns]").tz_localize(
"US/Eastern"
)
idx = cudf.DatetimeIndex(data, dtype="datetime64[ns]").tz_localize(
"US/Eastern"
)
expected = pidx[1:4]
got = idx[1:4]
assert_eq(expected, got)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/datetime/test_components.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/indexes/datetime/test_time_specific.py
|
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
import pandas as pd
import cudf
from cudf.testing._utils import assert_eq
def test_tz_localize():
pidx = pd.date_range("2001-01-01", "2001-01-02", freq="1s")
pidx = pidx.astype("<M8[ns]")
idx = cudf.from_pandas(pidx)
assert pidx.dtype == idx.dtype
assert_eq(
pidx.tz_localize("America/New_York"),
idx.tz_localize("America/New_York"),
)
def test_tz_convert():
pidx = pd.date_range("2023-01-01", periods=3, freq="H")
idx = cudf.from_pandas(pidx)
pidx = pidx.tz_localize("UTC")
idx = idx.tz_localize("UTC")
assert_eq(
pidx.tz_convert("America/New_York"), idx.tz_convert("America/New_York")
)
def test_delocalize_naive():
pidx = pd.date_range("2023-01-01", periods=3, freq="H")
idx = cudf.from_pandas(pidx)
assert_eq(pidx.tz_localize(None), idx.tz_localize(None))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/groupby/test_agg.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import numpy as np
import pytest
import cudf
@pytest.mark.parametrize(
"empty",
[True, False],
ids=["empty", "nonempty"],
)
def test_agg_count_dtype(empty):
df = cudf.DataFrame({"a": [1, 2, 1], "c": ["a", "b", "c"]})
if empty:
df = df.iloc[:0]
result = df.groupby("a").agg({"c": "count"})
assert result["c"].dtype == np.dtype("int64")
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/groupby/test_computation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import assert_eq
@pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"])
def test_rank_return_type_compatible_mode(method):
# in compatible mode, rank() always returns floats
pdf = pd.DataFrame({"a": [1, 1, 1, 2, 2], "b": [1, 2, 3, 4, 5]})
with cudf.option_context("mode.pandas_compatible", True):
df = cudf.from_pandas(pdf)
result = df.groupby("a").rank(method=method)
expect = pdf.groupby("a").rank(method=method)
assert_eq(expect, result)
assert result["b"].dtype == "float64"
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/groupby/test_stats.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/groupby/test_indexing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/groupby/test_function_application.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/general_utilities/test_testing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_sorting.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_constructing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import numpy as np
import cudf
def test_construct_int_series_with_nulls_compat_mode():
# in compatibility mode, constructing a Series
# with nulls should result in a floating Series:
with cudf.option_context("mode.pandas_compatible", True):
s = cudf.Series([1, 2, None])
assert s.dtype == np.dtype("float64")
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_conversion.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import pandas as pd
import pytest
import cudf
from cudf.testing._utils import assert_eq
@pytest.mark.parametrize(
"data, dtype",
[
([1, 2, 3], "int8"),
([1, 2, 3], "int64"),
([1.1, 2.2, 3.3], "float32"),
([1.0, 2.0, 3.0], "float32"),
([1.0, 2.0, 3.0], "float64"),
(["a", "b", "c"], "str"),
(["a", "b", "c"], "category"),
(["2001-01-01", "2001-01-02", "2001-01-03"], "datetime64[ns]"),
],
)
def test_convert_dtypes(data, dtype):
s = pd.Series(data, dtype=dtype)
gs = cudf.Series(data, dtype=dtype)
expect = s.convert_dtypes()
# because we don't have distinct nullable types, we check that we
# get the same result if we convert to nullable pandas types:
got = gs.convert_dtypes().to_pandas(nullable=True)
assert_eq(expect, got)
# Now write the same test, but construct a DataFrame
# as input instead of parametrizing:
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_accessors.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_datetimelike.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
import os
import pandas as pd
import pytest
import cudf
from cudf import date_range
from cudf.testing._utils import assert_eq
def _get_all_zones():
zones = []
for root, dirs, files in os.walk("/usr/share/zoneinfo"):
for f in files:
zone_name = ("/".join([root, f])).lstrip("/usr/share/zoneinfo")
try:
_ = pd.DatetimeTZDtype("ns", zone_name)
except Exception:
continue
zones.append(zone_name)
return zones
# NOTE: ALL_TIME_ZONES is a very large list; we likely do NOT want to
# use it for more than a handful of tests
ALL_TIME_ZONES = _get_all_zones()
@pytest.fixture(params=["ns", "us", "ms", "s"])
def unit(request):
return request.param
@pytest.fixture(
params=["America/New_York", "Asia/Tokyo", "CET", "Etc/GMT+1", "UTC"]
)
def tz(request):
return request.param
@pytest.mark.parametrize("zone_name", ALL_TIME_ZONES)
def test_tz_localize(unit, zone_name):
s = cudf.Series(date_range("2001-01-01", "2001-01-02", freq="1s"))
s = s.astype(f"<M8[{unit}]")
s = s.dt.tz_localize(zone_name)
assert isinstance(s.dtype, pd.DatetimeTZDtype)
assert s.dtype.unit == unit
assert str(s.dtype.tz) == zone_name
@pytest.mark.parametrize("zone_name", ALL_TIME_ZONES)
def test_localize_ambiguous(request, unit, zone_name):
request.applymarker(
pytest.mark.xfail(
condition=(zone_name == "America/Metlakatla"),
reason="https://www.timeanddate.com/news/time/metlakatla-quits-dst.html", # noqa: E501
)
)
s = cudf.Series(
[
"2018-11-04 00:30:00",
"2018-11-04 01:00:00",
"2018-11-04 01:30:00",
"2018-11-04 02:00:00",
None,
"2018-11-04 02:30:00",
],
dtype=f"datetime64[{unit}]",
)
expect = s.to_pandas().dt.tz_localize(
zone_name, ambiguous="NaT", nonexistent="NaT"
)
got = s.dt.tz_localize(zone_name)
assert_eq(expect, got)
@pytest.mark.parametrize("zone_name", ALL_TIME_ZONES)
def test_localize_nonexistent(request, unit, zone_name):
request.applymarker(
pytest.mark.xfail(
condition=(zone_name == "America/Grand_Turk"),
reason="https://www.worldtimezone.com/dst_news/dst_news_turkscaicos03.html", # noqa: E501
)
)
s = cudf.Series(
[
"2018-03-11 01:30:00",
"2018-03-11 02:00:00",
"2018-03-11 02:30:00",
"2018-03-11 03:00:00",
None,
"2018-03-11 03:30:00",
],
dtype=f"datetime64[{unit}]",
)
expect = s.to_pandas().dt.tz_localize(
zone_name, ambiguous="NaT", nonexistent="NaT"
)
got = s.dt.tz_localize(zone_name)
assert_eq(expect, got)
def test_delocalize(unit, tz):
psr = pd.Series(
pd.date_range("2001-01-01", "2001-01-02", freq="1s")
).astype(f"datetime64[{unit}]")
sr = cudf.from_pandas(psr)
expect = psr.dt.tz_localize(tz).dt.tz_localize(None)
got = sr.dt.tz_localize(tz).dt.tz_localize(None)
assert_eq(expect, got)
def test_delocalize_naive():
# delocalizing naive datetimes should be a no-op
psr = pd.Series(["2001-01-01"], dtype="datetime64[ns]")
sr = cudf.from_pandas(psr)
expect = psr.dt.tz_localize(None)
got = sr.dt.tz_localize(None)
assert_eq(expect, got)
@pytest.mark.parametrize(
"from_tz", ["Europe/London", "America/Chicago", "UTC"]
)
@pytest.mark.parametrize(
"to_tz", ["Europe/London", "America/Chicago", "UTC", None]
)
def test_convert(from_tz, to_tz):
ps = pd.Series(pd.date_range("2023-01-01", periods=3, freq="H"))
gs = cudf.from_pandas(ps)
ps = ps.dt.tz_localize(from_tz)
gs = gs.dt.tz_localize(from_tz)
expect = ps.dt.tz_convert(to_tz)
got = gs.dt.tz_convert(to_tz)
assert_eq(expect, got)
def test_convert_from_naive():
gs = cudf.Series(cudf.date_range("2023-01-01", periods=3, freq="H"))
with pytest.raises(TypeError):
gs.dt.tz_convert("America/New_York")
@pytest.mark.parametrize(
"data,original_timezone,target_timezone",
[
# DST transition:
(["2023-03-12 01:30:00"], "America/New_York", "America/Los_Angeles"),
# crossing the international date line:
(["2023-05-17 23:30:00"], "Pacific/Auckland", "America/Los_Angeles"),
# timezone with non-integer offset:
(["2023-05-17 12:00:00"], "Asia/Kolkata", "Australia/Eucla"),
# timezone with negative offset:
(["2023-05-17 09:00:00"], "America/Los_Angeles", "Pacific/Auckland"),
# conversion across multiple days:
(["2023-05-16 23:30:00"], "America/New_York", "Asia/Kolkata"),
# timezone with half-hour offset:
(["2023-05-17 12:00:00"], "Asia/Kolkata", "Australia/Adelaide"),
# timezone conversion with a timestamp in the future:
(["2025-01-01 00:00:00"], "America/New_York", "Europe/London"),
# timezone conversion with a timestamp in the past:
(["2000-01-01 12:00:00"], "Europe/Paris", "America/Los_Angeles"),
# timezone conversion with a timestamp at midnight:
(["2023-05-17 00:00:00"], "Asia/Tokyo", "Europe/Paris"),
],
)
def test_convert_edge_cases(data, original_timezone, target_timezone):
ps = pd.Series(data, dtype="datetime64[s]").dt.tz_localize(
original_timezone
)
gs = cudf.Series(data, dtype="datetime64[s]").dt.tz_localize(
original_timezone
)
expect = ps.dt.tz_convert(target_timezone)
got = gs.dt.tz_convert(target_timezone)
assert_eq(expect, got)
def test_tz_aware_attributes_local():
data = [
"2008-05-12 13:50:00",
"2008-12-12 14:50:35",
"2009-05-12 13:50:32",
]
dti = cudf.DatetimeIndex(data).tz_localize("UTC").tz_convert("US/Eastern")
result = dti.hour
expected = cudf.Index([9, 9, 9], dtype="int16")
assert_eq(result, expected)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_timeseries.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_computation.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_io_serialization.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_missing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_indexing.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_attributes.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_function_application.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_binary_operations.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_selecting.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_reshaping.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_categorial.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/series/test_combining.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data/subword_tokenizer_data/test_sentences.txt
|
This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত
This sample text is public domain and was randomly selected from Project Guttenberg.
The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors.
Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity.
Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them.
"Cass" Beard had risen early that morning, but not with a view to discovery.
A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets.
The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency.
This was nearly opposite.
Mr. Cassius crossed the highway, and stopped suddenly.
But the Goblin could no longer sit quietly listening to the wisdom and intellect downstairs. No, as soon as the light shone in the evening from the attic it seemed to him as though its beams were strong ropes dragging him up, and he had to go and peep through the key-hole. There he felt the sort of feeling we have looking at the great rolling sea in a storm, and he burst into tears. He could not himself say why he wept, but in spite of his tears he felt quite happy. How beautiful it must be to sit under that tree with the student, but that he could not do; he had to content himself with the key-hole and be happy there!
But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring.
Looking at it more attentively, he saw that it bore the inscription, "May to Cass."
Like most of his fellow gold-seekers, Cass was superstitious.
The fountain of classic wisdom, Hypatia herself.
As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge.
From my youth I felt in me a soul above the matter-entangled herd.
She revealed to me the glorious fact, that I am a spark of Divinity itself.
A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's.
There is a philosophic pleasure in opening one's treasures to the modest young.
Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street.
Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide;
but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind.
Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now.
His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert;
while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts.
At last they reached the quay at the opposite end of the street;
and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers.
He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
Nana also troubled him in another way. He had sometimes a feeling that she did not admire him. “I know she admires you tremendously, George,”
Mrs. Darling would assure him, and then she would sign to the children to be specially nice to father. Lovely dances followed, in which the only other servant, Liza, was sometimes allowed to join.
Such a midget she looked in her long skirt and maid's cap, though she had sworn, when engaged, that she would never see ten again.
The gaiety of those romps!
And gayest of all was Mrs. Darling, who would pirouette so wildly that all you could see of her was the kiss, and then if you had dashed at her you might have got it.
There never was a simpler happier family until the coming of Peter Pan.
Finally, I always go to sea as a sailor, because of the wholesome exercise and pure air of the fore-castle deck.
For as in this world, head winds are far more prevalent than winds from astern (that is, if you never violate the Pythagorean maxim), so for the most part the Commodore on the quarter-deck gets his atmosphere at second hand from the sailors on the forecastle. He thinks he breathes it first; but not so.
In much the same way do the commonalty lead their leaders in many other things, at the same time that the leaders little suspect it.
But wherefore it was that after having repeatedly smelt the sea as a merchant sailor, I should now take it into my head to go on a whaling voyage; this the invisible police officer of the Fates, who has the constant surveillance of me, and secretly dogs me, and influences me in some unaccountable way—he can better answer than any one else.
And, doubtless, my going on this whaling voyage, formed part of the grand programme of Providence that was drawn up a long time ago.
It came in as a sort of brief interlude and solo between more extensive performances.
I take it that this part of the bill must have run something like this:
“_Grand Contested Election for the Presidency of the United States._
“WHALING VOYAGE BY ONE ISHMAEL. “BLOODY BATTLE IN AFFGHANISTAN.”
Amy followed, but she poked her hands out stiffly before her, and jerked herself along as if she went by machinery, and her "Ow!" was more suggestive of pins being run into her than of fear and anguish.
Jo gave a despairing groan, and Meg laughed outright, while Beth let her bread burn as she watched the fun with interest.
"It's no use! Do the best you can when the time comes, and if the audience laughs, don't blame me. Come on, Meg."
Then things went smoothly, for Don Pedro defied the world in a speech of two pages without a single break. Hagar, the witch, chanted an awful incantation over her kettleful of simmering toads, with weird effect.
Roderigo rent his chains asunder manfully, and Hugo died in agonies of remorse and arsenic, with a wild, "Ha! Ha!"
This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত
This sample text is public domain and was randomly selected from Project Guttenberg.
The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors.
Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity.
Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them.
"Cass" Beard had risen early that morning, but not with a view to discovery.
A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets.
The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency.
This was nearly opposite.
Mr. Cassius crossed the highway, and stopped suddenly.
Something glittered in the nearest red pool before him.
I had one experience with Master Philip before our visitors betook themselves back to Kent, which, unfortunate as it was, I cannot but relate here. My cousin would enter into none of those rough amusements in which I passed my time, for fear, I took it, of spoiling his fine broadcloths or of losing a gold buckle. He never could be got to wrestle, though I challenged him more than once. And he was a well-built lad, and might, with a little practice, have become skilled in that sport. He laughed at the homespun I wore about the farm, saying it was no costume for a gentleman's son, and begged me sneeringly to don leather breeches. He would have none of the company of those lads with whom I found pleasure, young Harvey, and Willis's son, who was being trained as Mr. Starkie's assistant. Nor indeed did I disdain to join in a game with Hugo, who had been given to me, and other negro lads. Philip saw no sport in a wrestle or a fight between two of the boys from the quarters, and marvelled that I could lower myself to bet with Harvey the younger. He took not a spark of interest in the gaming cocks we raised together to compete at the local contests and at the fair, and knew not a gaff from a cockspur. Being one day at my wits' end to amuse my cousin, I proposed to him a game of quoits on the green beside the spring-house, and thither we repaired, followed by Hugo, and young Harvey come to look on. Master Philip, not casting as well as he might, cries out suddenly to Hugo: "Begone, you black dog! What business have you here watching a game between gentlemen?"
But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring.
Looking at it more attentively, he saw that it bore the inscription, "May to Cass."
Like most of his fellow gold-seekers, Cass was superstitious.
The fountain of classic wisdom, Hypatia herself.
As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge.
From my youth I felt in me a soul above the matter-entangled herd.
She revealed to me the glorious fact, that I am a spark of Divinity itself.
A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's.
There is a philosophic pleasure in opening one's treasures to the modest young.
Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street.
Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide;
but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind.
Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now.
His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert;
while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts.
and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers.
He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
Nana also troubled him in another way. He had sometimes a feeling that she did not admire him. “I know she admires you tremendously, George,”
Mrs. Darling would assure him, and then she would sign to the children to be specially nice to father. Lovely dances followed, in which the only other servant, Liza, was sometimes allowed to join.
Such a midget she looked in her long skirt and maid's cap, though she had sworn, when engaged, that she would never see ten again.
In the Year 1676, the Prince of _Orange_ having, in concert with the _Spaniards_, resolv'd upon the important Siege of _Maestrich_ (the only Town in the _Dutch_ Provinces, then remaining in the Hands of the _French_) it was accordingly invested about the middle of _June_, with an Army of twenty Thousand Men, under the Command of his Highness Prince
_Waldeck_, with the grand Army covering the Siege. It was some Time before the heavy Cannon, which we expected up the _Maes_, from _Holland_, arrived; which gave Occasion to a Piece of Raillery of Monsieur _Calvo_, the Governor, which was as handsomely repartec'd. That Governor, by a Messenger, intimating his Sorrow to find, we had pawn'd our Cannon for Ammunition Bread. Answer was made, That in a few Days we hoped to give him a Taste of the Loaves, which he should find would be sent him into the Town in extraordinary plenty. I remember another Piece of Raillery, which pass'd some Days after between the _Rhingrave_ and the same _Calvo_. The former sending Word, that he hoped within three Weeks to salute that Governor's Mistress within the Place. _Calvo_ reply'd, He'd give him leave to kiss her all over, if he kiss'd her anywhere in three Months.
And gayest of all was Mrs. Darling, who would pirouette so wildly that all you could see of her was the kiss, and then if you had dashed at her you might have got it.
There never was a simpler happier family until the coming of Peter Pan.
Finally, I always go to sea as a sailor, because of the wholesome exercise and pure air of the fore-castle deck.
For as in this world, head winds are far more prevalent than winds from astern (that is, if you never violate the Pythagorean maxim), so for the most part the Commodore on the quarter-deck gets his atmosphere at second hand from the sailors on the forecastle. He thinks he breathes it first; but not so.
In much the same way do the commonalty lead their leaders in many other things, at the same time that the leaders little suspect it.
But wherefore it was that after having repeatedly smelt the sea as a merchant sailor, I should now take it into my head to go on a whaling voyage; this the invisible police officer of the Fates, who has the constant surveillance of me, and secretly dogs me, and influences me in some unaccountable way—he can better answer than any one else.
And, doubtless, my going on this whaling voyage, formed part of the grand programme of Providence that was drawn up a long time ago.
It came in as a sort of brief interlude and solo between more extensive performances.
The British Isles have been ringing for the last few years with the word 'Art' in its German sense; with 'High Art,' 'Symbolic Art,' 'Ecclesiastical Art,' 'Dramatic Art,' 'Tragic Art,' and so forth; and every well-educated person is expected, nowadays, to know something about Art. Yet in spite of all translations of German 'AEsthetic' treatises, and 'Kunstnovellen,' the mass of the British people cares very little about the matter, and sits contented under the imputation of 'bad taste.' Our stage, long since dead, does not revive; our poetry is dying; our music, like our architecture, only reproduces the past; our painting is only first-rate when it handles landscapes and animals, and seems likely so to remain; but, meanwhile, nobody cares. Some of the deepest and most earnest minds vote the question, in general, a 'sham and a snare,' and whisper to each other
confidentially, that Gothic art is beginning to be a 'bore,' and that Sir Christopher Wren was a very good fellow after all; while the middle classes look on the Art movement half amused, as with a pretty toy, half sulkily suspicious of Popery and Paganism, and think,
apparently, that Art is very well when it means nothing, and is merely used to beautify drawing-rooms and shawl patterns; not to mention that, if there were no painters, Mr. Smith could not hand down to posterity likenesses of himself, Mrs. Smith, and family. But
when 'Art' dares to be in earnest, and to mean something, much more to connect itself with religion, Smith's tone alters. He will teach 'Art' to keep in what he considers its place, and if it refuses, take the law of it, and put it into the Ecclesiastical Court. So he says, and what is more, he means what he says; and as all the world, from Hindostan to Canada, knows by most practical proof, what he means, he sooner or later does, perhaps not always in the wisest way, but still he does it.Ah! It's pleasant to drop into my own easy-chair my dear though a little palpitating what with trotting up-stairs and what with trotting down, and why kitchen stairs should all be corner stairs is for the builders to justify though I do not think they fully understand their trade and never did, else why the sameness and why not more conveniences and fewer
draughts and likewise making a practice of laying the plaster on too thick I am well convinced which holds the damp, and as to chimney-pots putting them on by guess-work like hats at a party and no more knowing what their effect will be upon the smoke bless you than I do if so much, except that it will mostly be either to send it down your throat in a straight form or give it a twist before it goes there. And what I says speaking as I find of those new metal chimneys all manner of shapes (there's a row of 'em at Miss Wozenham's lodging-house lower down on the other side of the way) is that they only work your smoke into artificial patterns for you before you swallow it and that I'd quite as soon swallow mine plain, the flavour being the same, not to mention the conceit of putting up signs on the top of your house to show the forms in which you take your smoke into your inside
Amy followed, but she poked her hands out stiffly before her, and jerked herself along as if she went by machinery, and her "Ow!" was more suggestive of pins being run into her than of fear and anguish.
Jo gave a despairing groan, and Meg laughed outright, while Beth let her bread burn as she watched the fun with interest.
"It's no use! Do the best you can when the time comes, and if the audience laughs, don't blame me. Come on, Meg."
Then things went smoothly, for Don Pedro defied the world in a speech of two pages without a single break. Hagar, the witch, chanted an awful incantation over her kettleful of simmering toads, with weird effect.'''
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data/subword_tokenizer_data
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data/subword_tokenizer_data/bert_base_cased_sampled/vocab.txt
|
[PAD]
[unused1]
[unused2]
[unused3]
[unused4]
[unused5]
[unused6]
[unused7]
[unused8]
[unused9]
[unused10]
[unused11]
[unused12]
[unused13]
[unused14]
[unused15]
[unused16]
[unused17]
[unused18]
[unused19]
[unused20]
[unused21]
[unused22]
[unused23]
[unused24]
[unused25]
[unused26]
[unused27]
[unused28]
[unused29]
[unused30]
[unused31]
[unused32]
[unused33]
[unused34]
[unused35]
[unused36]
[unused37]
[unused38]
[unused39]
[unused40]
[unused41]
[unused42]
[unused43]
[unused44]
[unused45]
[unused46]
[unused47]
[unused48]
[unused49]
[unused50]
[unused51]
[unused52]
[unused53]
[unused54]
[unused55]
[unused56]
[unused57]
[unused58]
[unused59]
[unused60]
[unused61]
[unused62]
[unused63]
[unused64]
[unused65]
[unused66]
[unused67]
[unused68]
[unused69]
[unused70]
[unused71]
[unused72]
[unused73]
[unused74]
[unused75]
[unused76]
[unused77]
[unused78]
[unused79]
[unused80]
[unused81]
[unused82]
[unused83]
[unused84]
[unused85]
[unused86]
[unused87]
[unused88]
[unused89]
[unused90]
[unused91]
[unused92]
[unused93]
[unused94]
[unused95]
[unused96]
[unused97]
[unused98]
[unused99]
[UNK]
[CLS]
[SEP]
[MASK]
[unused100]
[unused101]
!
"
#
$
%
&
'
(
)
*
+
,
-
.
/
0
1
2
3
4
5
6
7
8
9
:
;
<
=
>
?
@
A
B
C
D
E
F
G
H
I
J
K
L
M
N
O
P
Q
R
S
T
U
V
W
X
Y
Z
[
\
]
^
_
`
a
b
c
d
e
f
g
h
i
j
k
l
m
n
o
p
q
r
s
t
u
v
w
x
y
z
{
|
}
~
¡
¢
£
¥
§
¨
©
ª
«
¬
®
°
±
²
³
´
µ
¶
·
¹
º
»
¼
½
¾
¿
À
Á
Â
Ä
Å
Æ
Ç
È
É
Í
Î
Ñ
Ó
Ö
×
Ø
Ú
Ü
Þ
ß
à
á
â
ã
ä
å
æ
ç
è
é
ê
ë
ì
í
î
ï
ð
ñ
ò
ó
ô
õ
ö
÷
ø
ù
ú
û
ü
ý
þ
ÿ
Ā
ā
ă
ą
Ć
ć
Č
č
ď
Đ
đ
ē
ė
ę
ě
ğ
ġ
Ħ
ħ
ĩ
Ī
ī
İ
ı
ļ
Ľ
ľ
Ł
ł
ń
ņ
ň
ŋ
Ō
ō
ŏ
ő
Œ
œ
ř
Ś
ś
Ş
ş
Š
š
Ţ
ţ
ť
ũ
ū
ŭ
ů
ű
ų
ŵ
ŷ
ź
Ż
ż
Ž
ž
Ə
ƒ
ơ
ư
ǎ
ǐ
ǒ
ǔ
ǫ
Ș
ș
Ț
ț
ɐ
ɑ
ɔ
ɕ
ə
ɛ
ɡ
ɣ
ɨ
ɪ
ɲ
ɾ
ʀ
ʁ
ʂ
ʃ
ʊ
ʋ
ʌ
ʐ
ʑ
ʒ
ʔ
ʰ
ʲ
ʳ
ʷ
ʻ
ʼ
ʾ
ʿ
ˈ
ː
ˡ
ˢ
ˣ
́
̃
̍
̯
͡
Α
Β
Γ
Δ
Ε
Η
Θ
Ι
Κ
Λ
Μ
Ν
Ο
Π
Σ
Τ
Φ
Χ
Ψ
Ω
ά
έ
ή
ί
α
β
γ
δ
ε
ζ
η
θ
ι
κ
λ
μ
ν
ξ
ο
π
ρ
ς
σ
τ
υ
φ
χ
ψ
ω
ό
ύ
ώ
І
Ј
А
Б
В
Г
Д
Е
Ж
З
И
К
Л
М
Н
О
П
Р
С
Т
У
Ф
Х
Ц
Ч
Ш
Э
Ю
Я
а
б
в
г
д
е
ж
з
и
й
к
л
м
н
о
п
р
с
т
у
ф
х
ц
ч
ш
щ
ъ
ы
ь
э
ю
я
ё
і
ї
ј
њ
ћ
Ա
Հ
ա
ե
ի
կ
մ
յ
ն
ո
ս
տ
ր
ւ
ְ
ִ
ֵ
ֶ
ַ
ָ
ֹ
ּ
א
ב
ג
ד
ה
ו
ז
ח
ט
י
כ
ל
ם
מ
ן
נ
ס
ע
פ
צ
ק
ר
ש
ת
،
ء
آ
أ
إ
ئ
ا
ب
ة
ت
ث
ج
ح
خ
د
ذ
ر
ز
س
ش
ص
ض
ط
ظ
ع
غ
ف
ق
ك
ل
م
ن
ه
و
ى
ي
َ
ِ
ٹ
پ
چ
ک
گ
ہ
ی
ے
ं
आ
क
ग
च
ज
ण
त
द
ध
न
प
ब
भ
म
य
र
ल
व
श
ष
स
ह
ा
ि
ी
ु
े
ो
्
।
॥
আ
ই
এ
ও
ক
খ
গ
চ
ছ
জ
ট
ত
থ
দ
ধ
ন
প
ব
ম
য
র
ল
শ
স
হ
়
া
ি
ী
ু
ে
ো
্
য়
க
த
ப
ம
ய
ர
ல
வ
ா
ி
ு
்
ร
་
ག
ང
ད
ན
བ
མ
ར
ལ
ས
ི
ུ
ེ
ོ
ა
ე
ი
ლ
ნ
ო
რ
ს
ᴬ
ᴵ
ᵀ
ᵃ
ᵇ
ᵈ
ᵉ
ᵍ
ᵏ
ᵐ
ᵒ
ᵖ
ᵗ
ᵘ
ᵢ
ᵣ
ᵤ
ᵥ
ᶜ
ᶠ
ḍ
Ḥ
ḥ
Ḩ
ḩ
ḳ
ṃ
ṅ
ṇ
ṛ
ṣ
ṭ
ạ
ả
ấ
ầ
ẩ
ậ
ắ
ế
ề
ể
ễ
ệ
ị
ọ
ố
ồ
ổ
ộ
ớ
ờ
ợ
ụ
ủ
ứ
ừ
ử
ữ
ự
ỳ
ỹ
ἀ
ἐ
ὁ
ὐ
ὰ
ὶ
ὸ
ῆ
ῖ
ῦ
ῶ
‐
‑
‒
–
—
―
‖
‘
’
‚
“
”
„
†
‡
•
…
‰
′
″
⁄
⁰
ⁱ
⁴
⁵
⁶
⁷
⁸
⁹
⁺
⁻
ⁿ
₀
₁
₂
₃
₄
₅
₆
₇
₈
₉
₊
₍
₎
ₐ
ₑ
ₒ
ₓ
ₕ
ₖ
ₘ
ₙ
ₚ
ₛ
ₜ
₤
€
₱
₹
ℓ
№
ℝ
⅓
←
↑
→
↔
⇌
⇒
∂
∈
−
∗
∘
√
∞
∧
∨
∩
∪
≈
≠
≡
≤
≥
⊂
⊆
⊕
⋅
─
│
■
●
★
☆
☉
♠
♣
♥
♦
♭
♯
⟨
⟩
ⱼ
、
。
《
》
「
」
『
』
〜
い
う
え
お
か
き
く
け
こ
さ
し
す
せ
そ
た
ち
つ
て
と
な
に
の
は
ひ
ま
み
む
め
も
や
ゆ
よ
ら
り
る
れ
ん
ア
ィ
イ
ウ
エ
オ
カ
ガ
キ
ク
グ
コ
サ
シ
ジ
ス
ズ
タ
ダ
ッ
テ
デ
ト
ド
ナ
ニ
ハ
バ
パ
フ
ブ
プ
マ
ミ
ム
ャ
ュ
ラ
リ
ル
レ
ロ
ン
・
ー
一
三
上
下
中
事
二
井
京
人
亻
仁
佐
侍
光
公
力
北
十
南
原
口
史
司
吉
同
和
囗
国
國
土
城
士
大
天
太
夫
女
子
宀
安
宮
宿
小
尚
山
島
川
州
平
年
心
愛
戸
文
新
方
日
明
星
書
月
木
本
李
村
東
松
林
正
武
氏
水
氵
江
河
海
版
犬
王
生
田
白
皇
省
真
石
社
神
竹
美
義
花
藤
西
谷
車
辶
道
郎
郡
部
野
金
長
門
陽
青
食
馬
高
龍
龸
사
씨
의
이
한
fi
fl
!
(
)
,
-
/
:
the
of
and
to
in
was
The
is
for
as
on
with
that
##s
his
by
he
at
from
it
her
He
had
an
were
you
be
In
she
are
but
which
It
not
or
have
my
him
one
this
me
has
also
up
their
first
out
who
been
they
She
into
all
would
its
##ing
time
two
##a
##e
said
about
when
over
more
other
can
after
back
them
then
##ed
there
like
so
only
##n
could
##d
##i
##y
what
no
##o
where
This
made
than
if
You
##ly
through
we
before
##r
just
some
##er
years
do
New
##t
down
between
new
now
will
three
most
On
around
year
used
such
being
well
during
They
know
against
under
later
did
part
known
off
while
His
re
...
##l
people
until
way
American
didn
University
your
both
many
get
United
became
head
There
second
As
work
any
But
still
again
born
even
eyes
After
including
de
took
And
long
team
season
family
see
right
same
called
name
because
film
don
10
found
much
school
##es
going
won
place
away
We
day
left
John
000
hand
since
World
these
how
make
number
each
life
area
man
four
go
No
here
very
National
##m
played
released
never
began
States
album
home
last
too
held
several
May
own
##on
take
end
School
##h
ll
series
What
want
use
another
city
When
2010
side
At
may
That
came
face
June
think
game
those
high
March
early
September
##al
2011
looked
July
state
small
thought
went
January
October
##u
based
August
##us
world
good
April
York
us
12
2012
2008
For
2009
group
along
few
South
little
##k
following
November
something
2013
December
set
2007
old
2006
2014
located
##an
music
County
City
former
##in
room
ve
next
All
##man
got
father
house
##g
body
15
20
18
started
If
2015
town
our
line
War
large
population
named
British
company
member
five
My
single
##en
age
State
moved
February
11
Her
should
century
government
built
come
best
show
However
within
look
men
door
without
need
wasn
2016
water
One
system
knew
every
died
League
turned
asked
North
St
wanted
building
received
song
served
though
felt
##ia
station
band
##ers
local
public
himself
different
death
say
##1
30
##2
2005
16
night
behind
children
English
members
near
saw
together
son
14
voice
village
13
hands
help
##3
due
French
London
top
told
open
published
third
2017
play
across
During
put
final
often
include
25
##le
main
having
2004
once
ever
let
book
led
gave
late
front
find
club
##4
German
included
species
College
form
opened
mother
women
enough
West
must
2000
power
really
17
making
half
##6
order
might
##is
given
million
times
days
point
full
service
With
km
major
##7
original
become
seen
II
north
six
##te
love
##0
national
International
##5
24
So
District
lost
run
couldn
career
always
##9
2003
##th
country
##z
House
air
tell
south
worked
woman
player
##A
almost
war
River
##ic
married
continued
Then
James
close
black
short
##8
##na
using
history
returned
light
car
##ra
sure
William
things
General
##ry
2002
better
support
100
among
From
feet
King
anything
21
19
established
district
2001
feel
great
##ton
level
Cup
These
written
games
others
already
title
story
##p
law
thing
US
record
role
however
By
students
England
white
control
least
inside
land
##C
22
give
community
hard
##ie
non
##c
produced
George
round
period
Park
business
various
##ne
does
present
wife
far
taken
per
reached
David
able
version
working
young
live
created
joined
East
living
appeared
case
High
done
23
important
President
Award
France
position
office
looking
total
general
class
To
production
##S
football
party
brother
keep
mind
free
Street
hair
announced
development
either
nothing
moment
Church
followed
wrote
why
India
San
election
1999
lead
How
##ch
##rs
words
European
course
considered
America
arms
Army
political
##la
28
26
west
east
ground
further
church
less
site
First
Not
Australia
toward
California
##ness
described
works
An
Council
heart
past
military
27
##or
heard
field
human
soon
founded
1998
playing
trying
##x
##ist
##ta
television
mouth
although
taking
win
fire
Division
##ity
Party
Royal
program
Some
Don
Association
According
tried
TV
Paul
outside
daughter
Best
While
someone
match
recorded
Canada
closed
region
Air
above
months
elected
##da
##ian
road
##ar
brought
move
1997
leave
##um
Thomas
1996
am
low
Robert
formed
person
services
points
Mr
miles
##b
stop
rest
doing
needed
international
release
floor
start
sound
call
killed
real
dark
research
finished
language
Michael
professional
change
sent
50
upon
29
track
hit
event
2018
term
example
Germany
similar
return
##ism
fact
pulled
stood
says
ran
information
yet
result
developed
girl
##re
God
1995
areas
signed
decided
##ment
Company
seemed
##el
co
turn
race
common
video
Charles
Indian
##ation
blood
art
red
##able
added
rather
1994
met
director
addition
design
average
minutes
##ies
##ted
available
bed
coming
friend
idea
kind
Union
Road
remained
##ting
everything
##ma
running
care
finally
Chinese
appointed
1992
Australian
##ley
popular
mean
teams
probably
##land
usually
project
social
Championship
possible
word
Russian
instead
mi
herself
##T
Peter
Hall
Center
seat
style
money
1993
else
Department
table
Music
current
31
features
special
events
character
Two
square
sold
debut
##v
process
Although
Since
##ka
40
Central
currently
education
placed
lot
China
quickly
forward
seven
##ling
Europe
arm
performed
Japanese
1991
Henry
Now
Dr
##ion
week
Group
myself
big
UK
Washington
ten
deep
1990
Club
Japan
space
La
directed
smile
episode
hours
whole
##de
##less
Why
wouldn
designed
strong
training
changed
Society
stage
involved
hadn
towards
leading
police
eight
kept
Institute
study
largest
child
eventually
private
modern
Court
throughout
getting
originally
attack
##E
talk
Great
longer
songs
alone
##ine
wide
dead
walked
shot
##ri
Oh
force
##st
Art
today
friends
Island
Richard
1989
center
construction
believe
size
White
ship
completed
##B
gone
Just
rock
sat
##R
radio
below
entire
families
league
includes
type
lived
official
range
hold
featured
Most
##ter
president
passed
means
##f
forces
lips
Mary
Do
guitar
##ce
food
wall
Of
spent
Its
performance
hear
##P
Western
reported
sister
##et
morning
##M
especially
##ive
Minister
itself
post
bit
groups
1988
##tion
Black
##ng
Well
raised
sometimes
Canadian
Paris
Spanish
replaced
schools
Academy
leaving
central
female
Christian
Jack
whose
college
onto
provided
##D
##ville
players
actually
stopped
##son
Museum
doesn
##ts
books
fight
allowed
##ur
beginning
Records
awarded
parents
coach
##os
Red
saying
##ck
Smith
Yes
Lake
##L
aircraft
1987
##ble
previous
ft
action
Italian
African
happened
vocals
Act
future
court
##ge
1986
degree
phone
##ro
Is
countries
winning
breath
Love
river
matter
Lord
Other
list
self
parts
##ate
provide
cut
shows
plan
1st
interest
##ized
Africa
stated
Sir
fell
owned
earlier
ended
competition
attention
1985
lower
nearly
bad
older
stay
Saint
##se
certain
1984
fingers
blue
try
fourth
Grand
##as
king
##nt
makes
chest
movement
states
moving
data
introduced
model
date
section
Los
deal
##I
skin
entered
middle
success
Texas
##w
summer
island
##N
Republic
length
husband
1980
##ey
reason
anyone
forced
via
base
500
job
covered
Festival
Roman
successful
rights
cover
Man
writing
Ireland
##F
related
goal
takes
buildings
true
weeks
1983
Because
opening
novel
ISBN
meet
gold
##ous
mid
km²
standing
Football
Chicago
shook
whom
##ki
1982
Day
feeling
scored
boy
higher
Force
leader
heavy
fall
question
sense
army
Second
energy
meeting
themselves
kill
##am
board
census
##ya
##ns
mine
meant
market
required
battle
campaign
attended
approximately
Kingdom
runs
active
##ha
contract
clear
previously
health
1979
Arts
complete
Catholic
couple
units
##ll
##ty
Committee
shoulder
sea
systems
listed
##O
caught
tournament
##G
northern
author
Film
Your
##men
holding
offered
personal
1981
southern
artist
traditional
studio
200
capital
##ful
regular
ask
giving
organization
month
news
Are
read
managed
helped
studied
student
defeated
natural
industry
Year
noted
decision
Government
quite
##id
smiled
1972
Maybe
tracks
##ke
Mark
al
media
engine
hour
Their
relationship
plays
property
structure
1976
ago
Hill
Martin
1978
ready
Many
Like
Bay
immediately
generally
Italy
Greek
practice
caused
division
significant
Joseph
speed
Let
thinking
completely
1974
primary
mostly
##field
##K
1975
##to
Even
writer
##led
dropped
magazine
collection
understand
route
highest
particular
films
lines
network
Science
loss
carried
direction
green
1977
location
producer
according
Women
Queen
neck
thus
independent
view
1970
Angeles
Soviet
distance
problem
Board
tour
western
income
appearance
access
Mexico
nodded
street
surface
arrived
believed
Old
1968
1973
becoming
whether
1945
figure
singer
stand
Following
issue
window
wrong
pain
everyone
lives
issues
park
slowly
la
act
##va
bring
Lee
operations
key
comes
fine
cold
famous
Navy
1971
Me
additional
individual
##ner
Zealand
goals
county
contains
Service
minute
2nd
reach
talking
particularly
##ham
movie
Director
glass
paper
studies
##co
railway
standard
Education
45
represented
Chief
Louis
launched
Star
terms
60
1969
experience
watched
Another
Press
Tom
staff
starting
subject
break
Virginia
nine
eye
##age
evidence
foot
##est
companies
Prince
##V
gun
create
Big
People
guy
Green
simply
numerous
##line
increased
twenty
##ga
##do
1967
award
officer
stone
Before
material
Northern
grew
male
plant
Life
legs
step
Al
unit
35
except
answer
##U
report
response
Edward
commercial
edition
trade
science
##ca
Irish
Law
shown
rate
failed
##ni
remains
changes
mm
limited
larger
Later
cause
waiting
Time
##wood
cost
Bill
manager
activities
likely
allow
operated
retired
##ping
65
directly
Who
associated
effect
hell
Florida
straight
hot
Valley
management
girls
expected
eastern
Mike
chance
cast
centre
chair
hurt
problems
##li
walk
programs
Team
characters
Battle
edge
pay
maybe
corner
majority
medical
Joe
Summer
##io
attempt
Pacific
command
Radio
##by
names
municipality
1964
train
economic
Brown
feature
sex
source
agreed
remember
Three
1966
1965
Pennsylvania
victory
senior
annual
III
Southern
results
Sam
serving
religious
Jones
appears
##der
despite
claimed
Both
musical
matches
fast
security
selected
Young
double
complex
hospital
chief
Times
##ve
Championships
filled
Public
Despite
beautiful
Research
plans
Province
##ally
Wales
##ko
artists
metal
nearby
Spain
##il
32
houses
supported
piece
##no
stared
recording
nature
legal
Russia
##ization
remaining
looks
##sh
bridge
closer
cases
scene
marriage
Little
##é
uses
Earth
specific
Frank
theory
Good
discovered
referred
bass
culture
university
presented
Congress
##go
metres
continue
1960
isn
Awards
meaning
cell
composed
separate
Series
forms
Blue
cross
##tor
increase
test
computer
slightly
Where
Jewish
Town
tree
status
1944
variety
responsible
pretty
initially
##way
realized
pass
provides
Captain
Alexander
recent
score
broke
Scott
drive
financial
showed
Line
stories
ordered
soldiers
genus
operation
gaze
sitting
society
Only
hope
actor
follow
Empire
Yeah
technology
happy
focus
policy
spread
situation
##ford
##ba
Mrs
watch
Can
1963
Commission
touch
earned
troops
Under
1962
individuals
cannot
19th
##lin
mile
expression
exactly
suddenly
weight
dance
stepped
places
appear
difficult
Railway
anti
numbers
kilometres
star
##ier
department
ice
Britain
removed
Once
##lo
Boston
value
##ant
mission
trees
Order
sports
join
serve
Major
poor
Poland
mainly
Theatre
pushed
Station
##it
Lady
federal
silver
##ler
foreign
##ard
Eastern
##den
box
hall
subsequently
lies
acquired
1942
ancient
CD
History
Jean
beyond
##ger
El
##les
growing
championship
native
Parliament
Williams
watching
direct
overall
offer
Also
80
Secretary
spoke
Latin
ability
##ated
safe
presence
##ial
headed
regional
planned
1961
Johnson
throat
consists
##W
extended
Or
bar
walls
Chris
stations
politician
Olympics
influence
share
fighting
speak
hundred
Carolina
die
stars
##tic
color
Chapter
##ish
fear
sleep
goes
Francisco
oil
Bank
sign
physical
##berg
Dutch
seasons
##rd
Games
Governor
sorry
lack
Centre
memory
baby
smaller
charge
Did
multiple
ships
shirt
Assembly
amount
leaves
3rd
Foundation
conditions
1943
Rock
Democratic
Daniel
##at
winner
products
##ina
store
latter
Professor
civil
prior
host
1956
soft
vote
needs
Each
rules
1958
pressure
letter
normal
proposed
levels
records
1959
paid
intended
Victoria
purpose
okay
historical
issued
1980s
broadcast
rule
simple
picked
firm
Sea
1941
Elizabeth
1940
serious
featuring
highly
graduated
mentioned
choice
1948
replied
percent
Scotland
##hi
females
constructed
1957
settled
Steve
recognized
cities
crew
glanced
kiss
competed
flight
knowledge
editor
More
Conference
##H
fifth
elements
##ee
##tes
function
newspaper
recently
Miss
cultural
brown
twice
Office
1939
truth
Creek
1946
households
USA
1950
quality
##tt
border
seconds
destroyed
pre
wait
ahead
build
image
90
cars
##mi
33
promoted
professor
et
bank
medal
text
broken
Middle
revealed
sides
wing
seems
channel
1970s
Ben
loved
effort
officers
Will
##ff
70
Israel
Jim
upper
fully
label
Jr
assistant
powerful
pair
positive
##ary
gives
1955
20th
races
remain
kitchen
primarily
##ti
Sydney
easy
Tour
whispered
buried
300
News
Polish
1952
Duke
Columbia
produce
accepted
00
approach
minor
1947
Special
44
Asian
basis
visit
Fort
Civil
finish
formerly
beside
leaned
##ite
median
rose
coast
effects
supposed
Cross
##hip
Corps
residents
Jackson
##ir
Bob
basketball
36
Asia
seem
Bishop
Book
##ber
ring
##ze
owner
BBC
##ja
transferred
acting
De
appearances
walking
Le
press
grabbed
1954
officially
1953
##pe
risk
taught
review
##X
lay
##well
council
Avenue
seeing
losing
Ohio
Super
province
ones
travel
##sa
projects
equipment
spot
Berlin
administrative
heat
potential
shut
capacity
elections
growth
fought
Republican
mixed
Andrew
teacher
turning
strength
shoulders
beat
wind
1949
Health
follows
camp
suggested
perhaps
Alex
mountain
contact
divided
candidate
fellow
34
Show
necessary
workers
ball
horse
ways
questions
protect
gas
activity
younger
bottom
founder
Scottish
screen
treatment
easily
com
##house
dedicated
Master
warm
Night
Georgia
Long
von
##me
perfect
website
1960s
piano
efforts
##ide
Tony
sort
offers
Development
Simon
executive
##nd
save
Over
Senate
1951
1990s
draw
master
Police
##ius
renamed
boys
initial
prominent
damage
Co
##ov
##za
online
begin
occurred
captured
youth
Top
account
tells
Justice
conducted
forest
##town
bought
teeth
Jersey
##di
purchased
agreement
Michigan
##ure
campus
prison
becomes
product
secret
guess
Route
huge
types
drums
64
split
defeat
estate
housing
##ot
brothers
Coast
declared
happen
titled
therefore
sun
commonly
alongside
Stadium
library
Home
article
steps
telling
slow
assigned
refused
laughed
wants
Nick
wearing
Rome
Open
##ah
Hospital
pointed
Taylor
lifted
escape
participated
##j
drama
parish
Santa
##per
organized
mass
pick
Airport
gets
Library
unable
pull
Live
##ging
surrounding
##ries
focused
Adam
facilities
##ning
##ny
38
##ring
notable
era
connected
gained
operating
laid
Regiment
branch
defined
Christmas
machine
Four
academic
Iran
adopted
concept
Men
compared
search
traffic
Max
Maria
greater
##ding
widely
##burg
serves
1938
37
Go
hotel
shared
typically
scale
1936
leg
suffered
yards
pieces
Ministry
Wilson
episodes
empty
1918
safety
continues
yellow
historic
settlement
400
Come
Corporation
enemy
content
picture
evening
territory
method
trial
solo
driver
Here
##ls
entrance
Prize
spring
whatever
##ent
75
##ji
reading
Arthur
##cy
Our
clothes
Prime
Illinois
Kong
code
##ria
sit
Harry
Federal
chosen
administration
bodies
begins
stomach
Though
seats
Hong
density
Sun
leaders
Field
museum
chart
platform
languages
##ron
birth
holds
Gold
##un
fish
combined
##ps
4th
1937
largely
captain
trust
Game
van
boat
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data/subword_tokenizer_data
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data/subword_tokenizer_data/bert_base_cased_sampled/vocab-hash.txt
|
26899
27424
875
7428432802425011718 0
5054974408289448963 6
18358444369622338053 9
5716902217424485892 14
8236612966193239043 18
15282833726017872390 21
15533348956988973570 27
9001315167781089284 29
7621090240282984451 33
15337888141402371590 36
16169070283077377537 42
15615300272936709634 43
12338784885023498756 45
3175624061711419395 49
9436392785812228615 52
12978641027296058883 59
14468815760709033991 62
15607694490571932163 69
53295083356623878 72
0 78
2230148770582976004 78
6120456721458209796 82
15411373208619074054 86
10274574020114097153 92
9000294930530661890 93
13031557903172483076 95
11350066664294002181 99
6325605033787362307 104
2909954277284188676 107
4104562716099355138 111
3267092979937387012 113
17525453481571210244 117
11532627846208440834 121
10784672185103672321 123
11229796758348255749 124
4379577250247562242 129
1041161126836283908 131
3854383966527313413 135
16467720483237810694 140
14820844471735454722 146
13111220924289178119 148
2548683052821249538 155
719749806464434178 157
2121722119826170883 159
9005614210949580292 162
7050169108294333445 166
17351764915062575107 171
14644698505496219141 174
11657834349296686081 179
13626797927783164930 180
14735048589438940164 182
1078491261937017863 186
7952761372439242754 193
7692446865301965827 195
4552111108816020995 198
12455022990418032132 201
1123962659471997957 205
3056549312838577156 210
1025661670765243906 214
5397331336358247944 216
7810366437124875782 224
1195318972358038531 230
7079722807026103811 233
2524512050942986248 236
1208593608912656389 244
458260789232344578 249
13194777122325112327 251
5922704468287492 258
11746235869336195079 262
8611574268876189188 269
7889840228953421829 273
16998721522558936068 278
6703563424903621638 282
8885848295085850114 288
13776273837475230211 290
6036043703810622467 293
2006225773287659526 296
14202467530861800964 302
7157057020317447684 306
16885485872491802629 310
12800303798361952772 315
621325108927868418 319
16727475898656483841 321
6890112792805515778 322
2421332377941126151 324
16243404411124196356 331
179400401794890244 335
2630159406474274819 339
1306609735592145925 342
14908020842914311174 347
1684452927247835651 353
9400495923215416322 356
8041860727239247878 358
5619270496913133574 364
2985476283152588291 370
18150632792370312198 373
13075355875451793410 379
7596576612263365635 381
7174955249282660868 384
2272878747426984963 388
9645618748109430277 391
5995177571885476868 396
16871713338758691845 400
11801224416933808644 405
15551192014010130949 409
8196030292452405250 414
4794784530053649411 416
68047322062825475 419
10163451915097363972 422
4366630365820669955 426
9174613115382159879 429
17673253091692480002 436
10710744348807818249 438
6301209632168211460 447
6557199531177304066 451
10370980735304160259 453
2426040420413965827 456
18123352379522220547 459
15891150425892429319 462
16507447417454265351 469
487708338428237827 476
14107089365716616196 479
747857609528251395 483
17357876987202521607 486
321005419951863300 493
703083947315053061 497
0 502
17149635587492691460 502
8277651075246678020 506
1819886593879462403 510
13106328552418381315 513
17519686381941948418 516
10696099526822671877 518
4627984173327437314 523
2628632462897246722 525
3686397216490033667 527
6617920799692924934 530
6679301623707790339 536
2596030458845084674 539
13288938917088308226 541
8348492885671808517 543
6252009608718840325 548
5807005916268695559 553
15382799971167504899 560
14954638692016032262 563
8963684459383523331 569
2934745887866391556 572
8236887590303639044 576
2016330563068923911 580
12976290063611676164 587
9986513189506445831 591
780378482699725318 598
383862355994530823 604
7511344867307093508 611
1435616864863593988 615
12590979271693393411 619
859813995721111047 622
17910873098448224770 629
16703366890805911553 631
6922480979814889987 632
8200210214462711297 635
18382541080931060232 636
12959023536126992897 644
11055794376142651906 645
8668012051305565187 647
6795201209679524868 650
3864186432644490244 654
4574634299775772674 658
2086703290536303619 660
7145543127561014787 663
9889572542971630085 666
3510566585561691650 671
10482036181312531460 673
4296479271603189251 677
17165580381790665732 680
17931697598514948104 684
5072138329769649158 692
17857316349005986308 698
1196313437880152072 702
16094827446472526340 710
6365083142954013701 714
17639674970007880709 719
1336948026798963208 724
15719079816546418177 732
453771991153695748 733
15666021623592344581 737
3887496731301423107 742
16351565489992748547 745
12913808626051103749 748
9427161342471792643 753
14610089064185748483 756
11909740995340709890 759
3386059367942955011 761
7100313088634791944 764
14954362273735097348 772
5300343188950335490 776
3306636399811602435 778
15049176780536452612 781
11478464585367391747 785
4192691696663825924 788
1724981527538165256 792
8923121468991320579 800
10407927314751914499 803
4140577061391662082 806
11024499228689010181 808
11103397578962422789 813
16103730809841527300 818
2161511371026989571 822
16905537098408481288 825
14418359835235787780 833
8643099440826274820 837
15803230958149170691 841
2270949347024239618 844
16607521085023703556 846
12520505897845165062 850
10502193626894192132 856
12350321094518214659 860
4950437143309872131 863
938542234576037889 866
9547302901107668484 867
7827404372121768966 871
17757593377946824198 877
13699186867246955524 883
9859653826627356163 887
16394835100035514883 890
13800374264730731525 893
16954635983094506500 898
8015308433863798275 902
858715644299290630 905
4519655150699331077 911
7134867591233050115 916
6432786657037144579 919
0 922
9408341322832972291 922
13653279902433200130 925
1249019122170091524 927
5444522055126761479 931
18233734556082323457 938
1838285473517654531 939
10799019207790220804 942
2448710159565130755 946
18425837006146807297 949
1384258267102048263 950
6553795393861204486 957
5022631533298058243 963
2595435540421003780 966
18298501952506793480 970
17380720526409169413 978
10291550905275666437 983
8968303908578660869 988
7762552109517888009 993
12993351549860134403 1002
13098482377540869636 1005
17174134275815044100 1009
2405939573849534980 1013
11051603729345690626 1017
2765842466801084934 1019
13348255112383532037 1025
4560899789258637829 1030
17071422935680193539 1035
11513452937230732294 1038
1637355496640499203 1044
14940739688966611972 1047
8286559267538602502 1051
6029036263825492484 1057
6337648087046756355 1061
12327119652833755139 1064
7489768843341343236 1067
17101806024406781955 1071
1494687508867621385 1074
915975103999953922 1083
14731060910946571783 1085
7993361195780195330 1092
13688799604315935236 1094
7328858946338903047 1098
2913637027195678723 1105
18189363439163655681 1108
11261484070936291332 1109
1244962005334571010 1113
12618388435910808066 1115
655187203027088898 1117
1699259352638115337 1119
9837815037477742085 1128
10558465000768489987 1133
3128326958710492164 1136
16210393874387209731 1140
3831602806328386054 1143
1858477608543888899 1149
11203849268139405826 1152
14876215834473532933 1154
838167957834962945 1159
4472540425609859076 1160
11410947109444917250 1164
8435818218907397633 1166
11045000766266457089 1167
12325335880954441220 1168
16708265953266297345 1172
18342265362969646594 1173
6953158344648897539 1175
9922701673105435137 1178
10113283973443524101 1179
11668798096262926343 1184
2129351334726026241 1191
5692959118811792390 1192
2917574127780044290 1198
0 1200
14420924818562740228 1200
6098057863303978497 1204
1252966646111680002 1205
7111078464697947144 1207
14144456899593720327 1215
7367692118573781509 1222
9319588592876439043 1227
5212294342286609410 1230
1600499660866511361 1232
17579747388547180552 1233
8365608306992954885 1241
10307394306592963076 1246
17092600292669807621 1250
17030981925892977667 1255
6929843536411176451 1258
9908722951841282057 1261
14685407131320535554 1270
12861962652898171396 1272
11958437143660911107 1276
15904867421058229764 1279
7283769647955500035 1283
7872121678898447876 1286
11726527760261815816 1290
2316085662456682505 1298
12840093831481137155 1307
15574983692566917639 1310
15176154862895929860 1317
16186650646772958214 1321
1965140296142659588 1327
17362020270091437575 1331
26356620300320263 1338
4688323194808506371 1345
470137109846916612 1348
785647648524588041 1352
686083037273571331 1361
8705676087000994307 1364
15985311040931325446 1367
8848102120172622345 1373
14900059783221505542 1382
11611185676221023751 1388
5823293000835959809 1395
11173877492782561286 1396
5985141512875075076 1402
16607272189142469634 1406
7000924871247012354 1408
12796508861938638339 1410
16352304696891085315 1413
12654027566339262469 1416
17652126895193709571 1421
2059554016646703617 1424
8824828815238545922 1425
8026041213654553606 1427
189105210507091461 1433
8038465995762949635 1438
0 1441
4346653818095449092 1441
13441396742193060358 1445
5067771148519478785 1451
210369551309682178 1452
7856429334361659909 1454
6456628847560069634 1459
4777640967745320451 1461
8983636279512822276 1464
14568805960710332932 1468
13817574021643753989 1472
14625711259902278149 1477
4632056779689710085 1482
17613320542667293189 1487
3172012402848437254 1492
8040798394603101188 1498
14064841209998140419 1502
1914908168343121410 1505
7368139610144548354 1507
12868473585497306119 1509
0 1516
1618708134596732930 1516
12587973098332420105 1518
4964388169698209795 1527
11644359715676310021 1530
2644060095775605251 1535
6430078223195648003 1538
10183198452214045187 1541
1240799682393062914 1544
594310634075621378 1546
2369514519273954820 1548
10180653661786314245 1552
954303650251543043 1557
14430712698160791045 1560
7362398115224322564 1565
17170839233019868678 1569
4334478792852912645 1575
6976600872204725253 1580
2757627166710815234 1585
11581525848542896643 1587
1902097979216049156 1590
7092174838851165700 1594
3776232881097953287 1598
4956341896516184071 1605
16560365104979398147 1612
9985649880040289799 1615
8870322153106933763 1622
6905121755133908995 1625
13368640352340902916 1628
6681848478588709895 1632
1825204937600832520 1639
10492979809894170628 1647
16021790814379410438 1651
2537982728896871938 1657
17110141827238231043 1659
8972517116882764291 1662
6878463938568223238 1665
3653948979877717506 1671
11414481194651397126 1673
14522267179648162819 1679
3098339502618796035 1682
7079749050994126342 1685
13571764215085394946 1691
4748948606525397506 1693
1577643399485818884 1695
4080235243237779462 1699
10874175738252140040 1705
8407257242091918850 1713
13208300770644489219 1715
692428139842995202 1718
1811883090719733762 1720
9059362818280152070 1722
1942856588307002885 1728
8118332366482353665 1733
4958069245857057284 1734
14647311378680886789 1738
10762024033896625670 1743
28898254948429830 1749
9834906317233815042 1755
14985989359682912259 1757
1282980713864208388 1760
6063131598875265027 1764
11171681444901584901 1767
9942643440891227650 1772
7536761905759707139 1774
17586310513048226310 1777
5368266791748388869 1783
14231943828217691651 1788
12518647321260815877 1791
129394441281844743 1796
2483490487411335170 1803
654244401428041732 1805
15646533714849457160 1809
11807354932867949571 1817
15902831808268765699 1820
16275101253541722114 1823
7489443708629377026 1825
15395914353243975682 1827
5617555619731661829 1829
3134100206450675206 1834
11607495136261988868 1840
4974806308616426501 1844
17446584074836170241 1849
15686830167444742663 1850
9706307518401206273 1857
1668062460313515521 1858
1175330870409010693 1859
6316020408117881860 1864
3926008952689808899 1868
7412001888157663237 1871
16350342416828571139 1876
17722048717800707588 1879
6638262866276511751 1883
7428951476729761793 1890
17816197047883941382 1891
1346568064340942337 1897
3701787015222295555 1898
6659812133237486083 1901
1828541539854978054 1904
12379063259192634885 1910
2611769333840765443 1915
9618163593004828678 1918
10135224491789939206 1924
12979651712861326853 1930
8882180359699969027 1935
8839565787481092102 1938
13328456084920556038 1944
14232512278042323458 1950
1868952656876792325 1952
7567044498348088836 1957
9878469525845452294 1961
10877666723773861891 1967
4437849393189355524 1970
542122243470857732 1974
4059190346138068994 1978
14321675947144358916 1980
14971180244834539009 1984
7944574903635664900 1985
6982417546170903047 1989
9205813465909939715 1996
14237044737088801799 1999
636814072910696963 2006
12520841226045264391 2009
8898943418672995331 2016
15646690259358356484 2019
15618851112604340228 2023
10285088843216830977 2027
18286036510192394760 2028
6450286360774949890 2036
12025307250191760899 2038
7044602746592181249 2041
8270361223031661060 2042
7199149542695273990 2046
16798091800673956358 2052
5285433079037354499 2058
8498140496880657410 2061
18434636390635965953 2063
8780418579830073348 2064
959965579978681347 2068
2666650386212475906 2071
4093783342266269185 2073
7977153448080645638 2074
3230317076849645570 2080
2644129221999468547 2082
7597431151331275265 2085
6151418962808616963 2086
16786361788616914434 2089
9522044737514147334 2091
15360350686533802498 2097
4398995179394704386 2099
4163122903470647302 2101
18110267126768664070 2107
17811600627481865731 2113
11988559903619469315 2116
5893679902922151940 2119
3302430115655037445 2123
2756050317441962502 2128
7373324598575981572 2134
15626353672087051269 2138
9026268416534243843 2143
5857105831257628164 2146
11246462751297413124 2150
7459631049065515526 2154
2175352842263141379 2160
9748465532031254533 2163
12060676108130005507 2168
8160425232164846593 2171
1665947540125783558 2172
10758171140537368580 2178
5744770555727548418 2182
15867521551313803780 2184
11178209498970826244 2188
2663862265833334277 2192
646145646253570050 2197
6886825228888300036 2199
5219187155516171272 2203
16142200027647465989 2211
8727938199665870852 2216
1200328579526163971 2220
12449385538114001417 2223
14632283715533800450 2232
5295800027246062086 2234
8827019094633400323 2240
14543826221768176641 2243
12388128316821831686 2244
3087048392675298821 2250
17669786912563615747 2255
3879520399747123716 2258
15648071975541157893 2262
5580473107362200071 2267
6895786389712974853 2274
17709709086906012676 2279
9627483233657542665 2283
9602326803985618949 2292
6748599026443758086 2297
11488364339401397254 2303
6716511183525677573 2309
16003763240189186563 2314
6003803301075291138 2317
15800367754014516746 2319
2817341800198731782 2329
2110085916033252869 2335
10353852055773781511 2340
8745468498457416193 2347
15197463976907486213 2348
11844773108515011075 2353
10745169896165544965 2356
9502565595236673539 2361
18340734722524717062 2364
0 2370
4877506240735029250 2370
6632868101528461318 2372
1094192348264738308 2378
15930308455756352518 2382
7517061312773919237 2388
11537382714050522116 2393
15343851421525887493 2397
15685583084244037124 2402
11443729733346354693 2406
18096845502703148037 2411
13060060807344890377 2416
8226818503915081731 2425
5171144332412330499 2428
5367144440061049859 2431
4687503341676126209 2434
8115677569098133507 2435
8753274682505368066 2438
6767268893840927749 2440
10747160183142327300 2445
5318831768157948930 2449
16744837601970291208 2451
3968740997769839108 2459
1041860322726726147 2463
13185494599343868419 2466
3781663100474830852 2469
8664347289501861378 2473
7145447006642560001 2475
977858689003972101 2476
188865761021926916 2481
14781205616979726850 2485
7514076159997088261 2487
15227633270557658627 2492
7486357174119883778 2495
7899052859637422087 2497
4312982947448530435 2504
2484418012864310785 2507
8450324929602980870 2508
11374778755239228418 2514
10780034123560756745 2516
10313953391808102916 2525
13836623279669341188 2529
16297706918062760459 2533
6404560275247226885 2544
8323769790774729734 2549
10061687257419431941 2555
6724033317759518212 2560
12265972209834273288 2564
4748706107567735299 2572
17588235414846031363 2575
16029681841978911746 2578
333014962274056196 2580
2819861156000228870 2584
17301319418358929926 2590
14323022738651812355 2596
17758251407482208260 2599
9992216596142364674 2603
5541911712511293955 2605
1880849355295036931 2608
15421034026101803523 2611
2288503501826235907 2614
2336333131728265731 2617
15127408664422292997 2620
6756061181968708102 2625
2316367058427453443 2631
13786932856453332482 2634
17564157627292750852 2636
5809790665868502019 2640
9389430036410766853 2643
15157257604368261123 2648
523412383725034497 2651
5270886391729814021 2652
8987256414287503365 2657
2751897370690544643 2662
47819066577966599 2665
9543124453318907909 2672
15186331456703232514 2677
9731347057535958023 2679
6234700495105510914 2686
17720066604242729989 2688
611878128332703234 2693
6029104170087404549 2695
14612606995632327172 2700
7357792311987945475 2704
6074856230289873410 2707
13368808999886628358 2709
5918378978107988995 2715
15624776793824203778 2718
4241055509726121476 2720
12687432015779367427 2724
4003272975122620932 2727
17483676776191982087 2731
2701605488646040584 2738
7387630099939362308 2746
16331822462747681798 2750
2197183442359868933 2756
17624623361194542087 2761
1749450990014992388 2768
2888206094896619010 2772
12985412669390948353 2774
9843120678422464515 2775
15590458610270713859 2778
5950622975418741251 2781
17607672802725530117 2784
1225097419526011394 2789
3758572251524375044 2791
5891371767718009858 2795
6843754938996156419 2797
13418347525088883204 2800
2887280155684756490 2804
7867196614872225796 2814
10992396837241625094 2818
15526482250456426497 2824
7582254907030848515 2825
14309589056601523716 2828
2843794758628944386 2832
10106627892829635078 2834
11117505412117820418 2840
17559521087909430786 2842
18410508844162253834 2844
7796754440171003912 2854
1826091018065355268 2862
5568124937607335426 2866
9164033835486570503 2868
7917102923116225537 2875
10708221634884163076 2876
966446973350329348 2880
1882776320247897092 2884
18137433528115911172 2888
7577505208556149252 2892
3902521102041700356 2896
11942362790107158020 2900
2328713611561709573 2904
8376513561567004165 2909
18415012889800110091 2914
7983446382889179652 2925
2304166271864391689 2929
708759182721729026 2938
10774631175750681603 2940
2608247964063907842 2943
7317603117343176707 2945
12615180422705001477 2948
17995452459822326275 2953
12439250137675515394 2956
9947610136498965509 2958
10340600516380348420 2963
10073894039732477444 2967
15954561361998232578 2971
6039226287079734788 2973
12684813664097613833 2977
8337524429261820932 2986
0 2990
5738139389410570757 2990
0 2995
163262518049440773 2995
11390362112332120070 3000
7666496378417453571 3006
17188351170280199170 3009
14157925477049500677 3011
16535316221715341826 3016
701193705161007105 3018
15417977144980853763 3019
9623949443365348357 3022
16537640731048440324 3027
9880057250380779521 3031
10507448958568448514 3032
9901540867816521219 3034
10882434502571251716 3037
15939490563935542790 3041
3818155241101528578 3047
10810785028031231493 3049
17268925026504538113 3054
6000103580025957894 3055
14492044616225970179 3061
8964295197943843335 3064
13244227239481936387 3071
2072267724499101186 3074
735562179013069826 3076
3271477415853879302 3078
1150251700717751812 3084
11835839830005115393 3088
17028480913889055238 3089
16864969398419772420 3095
9646252156141336066 3099
5589333819644110342 3101
14729039479109188098 3107
2256025994407046148 3109
5630416426912279555 3113
23611161351524356 3116
16061932977440933889 3120
7560058124185071106 3121
8943767870065516551 3123
17388385529962317834 3130
11686727589179028995 3140
2993671307613155843 3143
7451626547139373061 3146
12726375988952098305 3151
0 3152
1735273330892205060 3152
2746028049042776065 3156
17093562035495421445 3157
7598703106262353411 3162
17526920923827930631 3165
0 3172
18087597149122765317 3172
11336730259137625602 3177
9704022087244797957 3179
14531181144788964866 3184
5103530438547424773 3186
7049971328222257156 3191
2593832991454060548 3195
2549992206172832771 3199
2656864556911864322 3202
3094347590740453380 3204
0 3208
10556974365044028932 3208
12597146506913681926 3212
18243354473097630721 3218
4168646291002030084 3219
8893226051755120644 3223
7904367695210051587 3227
17247367703075879942 3230
1338287165638264836 3236
6734394253777139715 3240
14645087877274778627 3243
1841749727013933062 3246
0 3252
9793622484838288388 3252
15384076833580083718 3256
14678310837729104389 3262
8947895455599830021 3267
12421729442783160325 3272
14382812703434878978 3277
3484468606955360259 3279
2411175954345499653 3282
18322361710054416389 3287
8989744845956541448 3292
9637438279185886726 3300
8282725403817063939 3306
10727259769060221446 3309
280860399088910340 3315
3074647116268871172 3319
9311932047626983431 3323
2990333995786696707 3330
11415454184475025922 3333
8194042667332418565 3335
11269986522125913093 3340
10773634478079810565 3345
0 3350
4302235270674672643 3350
4579270605621971460 3353
3687011949425630213 3357
9678333478858482691 3362
14661606109051090440 3365
9504123850532876291 3373
14299233528797568008 3376
10370491504729965060 3384
286239823911254530 3388
7969121812144744451 3390
16606218867148559880 3393
11756345184017143302 3401
8204961944753809412 3407
12456910480062157316 3411
7569786299014196739 3415
3372309516929818119 3418
16631131943564946948 3425
4436969913528429575 3429
14467771002258720772 3436
15278270405312088583 3440
6638334178561090565 3447
8154814430089498114 3452
17289464348431017987 3454
13185969354886446085 3457
4725380864147687429 3462
14933071000620043778 3467
12471883028204926466 3469
13286302152236950530 3471
12020003522260348419 3473
11784545509165047810 3476
10311182359550097412 3478
2262872037167824902 3482
15672162207595698690 3488
8479660175647360516 3490
543122224331105283 3494
8738610060644560897 3497
15969479020845567490 3498
3500
5303047073946667464
210658854139
493093586
15289397349632312454
5941764183477191834
3477193953305167424
236453760381
7470284155521404014
24445261
16426766960960540026
14549236
817365937
1873618471841499416
71893492
10694515171064744788
29330183088506125
61997475
4653200
109445719
8926052536804313893
7528330190111771360
1418462186
5887104182899575287
2625321597997091447
23407864425745813
1647838213
6152225753094686522
14151987057237756511
18058417591402760409
538510099
17855463731522440261
240752528220
27920040887059601
11078361536363433136
12517601
15885957841278600403
518718202
805438326
2621553
1550910461
2411070513
59965836
13012951802392676509
97518103
2625321602295859611
30277976
546374457
16759426304739641933
259654328
27356063970624739
1873618458944931675
6209987959894902621
5728764444739437994
18413109988782047308
13885455448020813663
13464164481390611573
5514354709969504081
6364097374632348674
2676033351739376985
1136798196293306910
5299098874403555921
2120987217453057458
17306856587979066781
1873618532028844481
5572365145471912335
18412263926676652075
105382480
5303047039553965447
9881712940254169714
152830562
8610102806501591788
15524263781940136850
14282671233461718187
2857298572705729021
29330122900898936
10554335258691243263
8453377129057749572
18411417864571256842
811271050
1873618489038604579
4657106642463886071
2676033356038145381
514654951
10757572347027851837
4237766514325588729
571999061
9821766011288487605
7230168968130792223
2704904949959166469
1823671323
103350839
46006654
2755882956846859930
15289397371128186695
12662636664722033563
16318735
18411417894664929297
5462796894122411284
9950019064427710530
6981729909914862956
1992588707391932346
63766972
6422699
23407808536904833
15394822466617412826
16881139139804531782
14312300901618944289
2625321593698061230
9870724570679212
5780604289886653255
3870997034531752803
2531021389865944442
10908568553618343357
1860700038481053299
196215461
1801847830
24183115
18424247431471827427
14287090
417019855960
71631344
4391052
61735328
18413674012989259870
2625321597996829544
17957750408840481687
9870724568648556
41943405
2789363542978135882
18412827950883864637
548143940
22151483
17257283845880874759
899112529018292807
538247952
69599701
8510664359869943178
27356081165698156
27638084672359236
12255453
11400819049620310987
1321272283
16881139122607162703
2359405
3101815889301670444
518456056
9232147856523987724
3758799212073651272
3591160524196219107
154600049
17946608694533885076
11500631658516907905
825323275339564903
9870724566615620
39911783
12318365723907459763
546112310
18412827980977537092
536216330
2676033351739114988
11069796553860646809
7880043043777809442
451412296787
18411981918872141859
11678577273375754735
8856014234050823647
105120332
1309344723
162464400
681145240220010584
2626514825137096412
6589396841525218018
356832249381
6156738032733324876
11202456151687629452
27638041680086900
11243723090649876783
5726358144768542273
12498251711624252784
13702827714901707594
811008904
8192198
8714520725396523830
514392806
9960543895307946415
15287141235608259625
5727354401416546168
1808894516123993997
3686437022462641529
5249797181178709209
2625321589399030850
103088691
3062219857732765097
830399540494469985
530117487457144076
12454108019635062383
197984938
8930986418384079868
818873277
16056587
11526999220155450649
6160551
63504826
7621890105505615217
11847668763332905754
10377426660276898779
1873618519132015281
18092519415945890646
15882855708139391266
7993599274919922706
2789363538679106064
2150364451440035988
9870724570416301
2625321593697799226
91161094
1410073577
23920969
7513578521803359945
22279798815198594
15520597512816297356
1023125932615797552
540017436
8910392170935354895
195953314
644809585
14024943
71369196
1873618476141774348
816841645
10906583479868327250
1454041666728626384
4128904
18413392005184749654
108921430
468609401971
16204201012116260706
99025451
9870724568385196
18412545943079354421
11878630053446878902
18204249488608200784
5566476545725367766
17951898368652543383
7558005371879033601
16542141154387102177
6316393479032998553
11694336983993944146
11427331956784106382
4662073785906890031
1873618454645640429
537985804
12999620585941961275
2295119206548507606
11993306
1597536180772867045
5299098844309358384
8294669686619703163
69337553
1873618506235448739
518193910
5406444726343502428
16765215479188031591
5460499803636172954
3431717683755289915
28202117477106938
5249797172580910311
5745384143842643344
14065038233622153931
14311172801615955497
16758489844492275047
5510538272098551989
11065487220741573048
9870724566353399
5679882735784101879
259130038
87097857
3491703471172619422
545850164
18271599167641487963
5991347923196709309
1873618458944406678
7033448275620070919
812778389
434977997061097911
3445982126355516078
2676033351738852867
3545799512027105927
1873618484739311861
12749251354825264418
14836382508930370955
2625321585100000596
21997756618246082
8716776809328151764
15580874176502892132
3332575624131774585
4445946672738010859
5780604328577598853
2848264744227112681
1873618441749072804
257098416
4930631980557601532
6877319166685482198
1005889956380019628
820642761
17826079
23125779236849772
810746758
7930050
8929320279979198383
9654763076979264499
11949535972653271176
1873618514832984063
514130660
18066207382028748450
2573543666009114673
18613585580197092
1427238547443354327
2625321589398768544
102826544
5903884228619468800
4279043148
7036226112429884975
818611132
15794439
3324580943442478547
1903640920853056624
5898403
1873618497637649718
1133620887485417426
10156853965084755435
63242678
282723005
13586095437453200186
9082058141968173941
1987794462939089941
13237708531286474753
5240852582657493474
1915314009235720841
9870724570154139
90898949
17090754651615726815
492307151
195691169
11050161621988804687
23658823
11623400942792738969
9304480456320748248
71107048
816579498
23971751058934778
17869638717220195611
1873618476141513316
361675971417279818
61211034
1873618501936418049
3866756
567411536
5302201063430292982
8486888319115725460
12406930521299355297
9870724568123690
11034422950646711803
4287350254045103750
5566476545725106758
1923875870
547619651
6366353527348595732
8597156797828894009
13590665243542948895
13237708561380147208
4254959725487523541
2907303882175415846
1873618454645376983
9230753948926543533
11731158
527827717
5511666307614640107
1330643932
69075405
28202091681942395
4727296740454696303
1992881785902860007
18301216972081072101
4076606659425995504
9870724566091296
39387493
154075756
5459976644113468289
545588016
12461042340477994821
223556406340
32432337723721245
19595563
2573543610120276856
24535874149025753
5196265237615086368
17735566651085687884
6204347601746593065
1873618484739049815
812516243
6152225714402428442
15291935501556190620
15505670362359531298
451411772583
9484411285755463284
161940107
15292499508566297469
563348302
506004186
11238431078799509026
18323667541285735009
2625321610894640833
103179363763488430
503001580666
12769025487284210679
17785259844527786731
29612147900877606
15290243377345399572
17563932
7667902
3186488476490139978
810484612
1192315333980326167
1873618514832721746
15292499491370961900
513868514
5347351719937377689
45220217
11775490430040476325
12240192446106372977
35324256
2396555433535145871
7409502855497715015
7888341864134085054
4278781002
1732546121802517809
2374936041605498895
21433680820701635
12189960762281954023
869984510486186619
3598203394278688718
6103488079777762245
72876542
16990917635978692369
818348984
15532291
1146796961722731823
17761874897365304540
62980530
4534407021717882867
5636255
32714379920409891
12552846396214610071
6262673798361580735
2528483177756102046
9870724569894177
9297735470756268616
5831598115918776853
32432303331018178
6064762127302393958
6156455943246842659
23396678
13500652
16916327697533962956
70844900
816317351
18411699885273055253
5884848047378859255
5837238405281154301
14311736903207619026
5141736951422061236
3604608
31022281504523376
3599049409094225259
577045344
2974323816123992770
8021450341214588326
3577503648415550265
509805280
9870724567861628
11098517635487303139
7462549834646555859
98501157
5779476207078475458
219257375260
490013379
4222974949961697922
6366353553143235674
3158171969379764633
21365044
27638058876667848
29330140097217635
1873618454645114642
2703776923039566000
68813257
279448782049
814285726
12237654319976351671
517669620
5779476284463187670
10375505326587315831
18411699915366727708
6205475624366966000
3307734082
39125348
1087507565178193378
545325868
15986098390340470919
223556143025
19177592590632702
8865366478519731984
19333416
32432337723461001
812254097
11305519054433421356
1873618484738787248
5105416417023100899
572982104
505742040
563086155
104333894
8070528080642443989
11327137566841769230
2625321610894378836
16377260960560187819
15586729198848181726
1873618441748546884
18413109971585663048
4825924017323379312
5915592292141435844
5832726151436896491
17247780946628644032
810222466
7405754
11549275701007551889
10161648502327149991
570950482
1873618514832459339
313841222762
4452458274095237609
1445774942907271091
6101795934071424788
92406286
5293539447540681024
18331491793766525
197198505
11199980773228349986
32432320526091507
818086838
1997667722089860216
2524806027085153844
1964966944
15270143
1370042529145686776
5565348523104797810
18331539082773742
62718382
2012415014
18413110001679335503
5374107
14282027259104724924
10375505339483621145
9887461037680036022
1873618544926132491
4662355883991631380
18412263939573940270
157614716
3295137431799204142
9870724569630759
491782859
214958343888
16875205763331852041
7241607903360452069
5408471212899110030
23134531
18411417877468545037
27356081166681957
644023149
70582752
816055205
3342460
5246976952665638015
14212253575230457510
576783198
1842511416005692464
806159226
5566476498435574920
15292217517958891614
13516735047310051359
5728764487730398405
468608617008
4025969582498383295
16044698410490725659
1519546451849645365
9870724567599405
5566476545724581156
5619444426388998007
98239009
547095362
27356033875641745
219257112483
8140646021471143544
4713167439824750602
16357059045845960667
5462796881224795644
9138963602338286574
21102898
10905173367761798655
13701595356116683915
2477484405147109478
1880166538706292058
11206864
1283692271244348427
68551110
5885543833259674054
18413673995792875610
2352415791
14947075702982868
5299098870103476096
681145240220994278
163447447
331038328206
38863202
96207382
153551462
2625321606595348609
5461104757014004985
10744889200825601240
1988559907
258343605
6517011693716180143
535167753
2530175340657839273
811991951
15291935475760762248
4397798264919820154
18413674025886548065
12109395139072755174
475082778886408323
104071746
161415815
8697110475982376165
15584540329550678645
13669583335851559254
2625321610894116800
1873618441748286746
18412827963781152832
819856323
6209141854797957852
1783548230307677653
18411981901675757599
637928298
7143606
15855332315905657597
2625321864544389907
12020808312486431384
3076135121411313050
10139438201185111279
6152225744495577231
33560368941368890
210659313158
4278256712
27638024483702949
24904017
32432320525830439
13263754581809432790
817824692
15007995
359800716494834349
18613516794268696
9839328478246341893
62456234
5111959
18411981931769430054
16219982623696489082
6261827792145090364
7692717626264324682
42664306
13806855580317125108
9870724569368358
16269555352897260337
214958081659
11214563466575480865
15636771529559117046
13271165719268362246
2652485274356286816
538968856
3784724792312663401
18263821886743185772
1986666427421953426
5565348480114297669
5352348827359053328
12976359
1873618476140725820
421319345246
70320604
11703165067112811597
21715697223994697
3757107087862401328
60424594
3080312
10697899350700788395
1873618527730534170
468608354196
509280991
50528646
1193603335023233930
16635669954819197974
15426482629288462533
5460499803637156023
2625321602296318353
9870724567336570
97976862
8818864638845060491
14288223544298637564
88080898
6996745855548787140
5566476571519223063
546833214
220421203678071202
31022238513759415
1873618458945389823
6406389097441592980
20840752
813761433
27356085465188671
68288962
5865888353649363875
109394696450803010
12213481117926952067
18413391987988365394
10944716
517145329
5723537903358642458
21715753112570631
7758478083289188556
10675690836223986039
153289315
95945236
11547019543992076059
9649086479758069023
2625321606595086582
258081459
544801575
5887799994573980828
2845029447323880298
18809125
8510103668314541335
6205475701751155414
1990332636357069057
429916882098
2673382969485886910
1873618489039064439
18413392018082037849
10914208898869168291
3773122177597967623
161153669
103809598
14107087915135404740
6366071515245381876
18412545955976642616
15289397371128645360
5462796868327967227
1402930148
28202057290482949
797695489810761887
16777494
18116142943679220675
5142301044413893172
17219576355390295334
5249797112394286460
13735950183222348532
6881458
29048192479791616
16896582888638318388
14517406836956661503
5458848655886518922
313840698753
5197393273133271298
3861350810962691992
6375653898722412075
16885380374869314205
361129707266
210659050964
29048123694646491
3017170418691476659
1873618450347593089
15290243360149277503
14745847
72090103
14546784569801180959
7431889721301470079
6364097387529111599
2435475427475262665
1873618497636600365
6151097734773868363
62194086
17083693200934636558
32150372909516328
4849811
3172873313800750756
2150364429944620611
3862478902367620470
9305858029919208637
2625321597997287853
2508194873
491258567
1408762855
5015996636573993090
2414921941537785811
538706709
5734260728554980678
22610237
12714212
70058456
6208295882974168451
32714336929384395
16643035121679272213
20023641798084435
4770547828131824981
2818164
1930668198955452820
13726068529822894439
468608091255
5569296714050766113
17490170188584258190
8694008299851745161
7073102484926630551
155058804
97714714
40370537
2625321602296056238
1703347206
15895039144349470066
5352348805862656188
3068049059797011246
5880738612678821404
12309852946450942075
33560429128451329
15289397384024950845
4767727591019973374
10682570
10233718743719545342
850088361543927300
2792183694107936667
1107456968073808590
5759560470823897206
162923155
29612216687004362
5875369269012203157
95683088
294416195335096411
22279760122415532
5639662680184522626
17619012653768771484
13237708544183762948
8550520059753138843
27356042474686002
249849483538007723
544539427
13390152586296232130
10906513561824594910
18546980
1873618489038801706
2676033356038342054
6313103561496791450
2063139881
6848542126596623056
160891523
103547450
14101293042239958
6151097653090126690
1584595969
12424382439595706534
17698252132056434004
4129856573689694799
16885259953617962521
12393440069873436875
32432320527338097
21433680821684597
8617826180017097033
1413046597527668667
3973491001936446780
819332033
17305802226190387588
1873618467542665344
16515346
6619310
6206321690771522709
4089771542585346905
1223976962194278208
13487493291780736605
2487491354099451134
8854886172739175692
9870724570875039
2625321593698257851
1535116279
6262673798362565305
91619849
493028049
5352348797264856883
8143564249694210398
6151097683183797493
9386257309953099582
196412070
3865299044899163405
71827955
18613366323088485
18157949162008873831
7562235583526800081
817300400
4618470194090937269
4587663
3932922014897081298
61931938
1873618497636337289
2522831856378710008
6364097413323754682
6053028402293443390
42140016
12287601267178473523
2625321597997025900
538444562
15991329612793777185
15291089478142986477
12452064
2676033644081056812
2556016
16508579235574254010
805372789
59900299
14787093348585572176
2575517759332551933
2412665810316625225
7730749911729375728
6155298010574883251
10488220504998020326
1311572948
883931539946605906
5352348805862394041
2786543383251193103
546308920
3346269252
5782296426993943791
4469799173763958889
6205475671656957491
7872981661881076049
18116424960081923281
2676033351739311464
516621038
1465168459078698840
5677488692584514734
105316943
4562124351240801677
5245848874158263187
16432982289349543214
162661010
3971798877726246151
4787251587800828866
5875369294806846690
12217235256243064050
95420943
5354604868299326678
4502324021619918399
544277281
5940918086979029952
2014710471177341259
2140013610
1873618463243635741
18284834
2676033356038079832
10531295876509927029
5458848625792321791
18411699898170343448
7410231625909407077
3478039985316562895
6204347606046083061
31586254122912349
6829167320236755019
27920101074341046
13165236096819726043
32432389312220424
571933524
5727354401416743090
10225919154718574351
4127600472563058730
160629376
103285302
8483828720842049762
15740334315622960494
206359759935
9813006656186419950
9319686106503382840
5515085278788979157
232154663489
26149204
6208295848581203181
3094190453106412515
6520986101609793850
32432320527074663
5245848925746038203
5942328186188203485
1873618467542403595
16253198
15881445561639371975
6357162
63701435
15515478115209971466
5833854247140395797
283181761
19177532404009207
16567374854657149772
684134257893509654
9870724570613070
15680489859993767209
12826571498698443033
2625321593697995819
10329316755526125416
10754752208794748192
10758418391935812957
12105446909435186010
3143159678306028631
236453432350
540214046
14848239906707278405
29330157293274228
684134210602468610
817038254
4977791693940394179
71565807
1873618497636075077
807142269
61669791
11287403619712895066
4325515
13819298136066198
7734678113259293802
6098975847429179176
99222062
18056758355458722638
9870724568582655
16224960573811657069
2625321597996763849
4078298757842341053
17625510063045740642
10528906628815718922
490734276
5412367062202975465
22085946
12751507524739009261
538182415
12189916
18413109984482951243
2541195915421354200
6671860954713623381
2893509029140760671
69534164
747829823970020707
6770804071406897080
2293868
5566476498434524382
6534429686359852912
18412263922377556010
164430493
9870724566550039
154534512
10167299845199168903
12754891682880490747
5250413516934022944
3315661715940248009
451651625195343029
32432333423379563
5941764217869305943
2141783083
283748271730
10161648493728303880
5240846595623881868
67502526
15618641120352995308
2676033351739049517
6205475697451599682
4023356732265137752
14986955239351847842
31304272112126853
516358893
2207492698791414354
477207135345
1309279186
105054795
17859691850682797212
162398863
4238330517036600601
152502880
18412263952471228465
257295025
10905173350565414454
17498716255300421272
8881019260503721949
18022689
534119176
18411417890365833232
6293435910568086045
9374458755688828226
820839372
6153071780807051278
5909364179964069981
8126661
3735453693364143828
6155045908522469290
745740842898098858
2625321589398965240
12142525752872799042
160367231
17958290734101235336
9523554809025136564
16892239439269464715
15289397371127860096
1736311827
15991050
63439289
6095014
12484855343804124176
9658025172156550406
18067928153034001057
292345808939
16572875051796793000
10542598463376395267
12772641161582545873
18413674008690163805
1544487931
14737352740221028816
282919615
12808641794728789765
2625321593697733840
17128487303121020
1706624008
14101026494875963
11214563466576463780
18412827946584768572
11966722661119888545
6156455943247300775
5300226909920168653
6004915412369541960
816776108
4223816177647290930
71303659
1873618476141710425
12477949191893683608
417019528294
9511403338599564690
4063367
61407645
2543805385922512178
9870724578216632
5407707525201267705
9870724568320021
2564752444
98959914
15494005608834598990
15140097999495498431
21823800
12734096628671909131
537920267
18412827976678441027
11927769
69272016
18411981914573045794
2571498445011814318
10592171188278987146
2057911839619745748
9870724566287831
154272366
545784627
17616192489740896443
21715680027609308
16886908734816455284
583336804
2246313005
516096747
2625321585099935141
620888934
162136717
331037018572
477206873177
503001777494
15592058013925444099
1652810939277510396
10531295803425490030
3205882223899445065
31304323701671300
28484129580057898
1873618441749006513
16893851890367073119
820577224
16904712944498838074
1394017249
17760542
4160689491693538063
4047541379259827663
7864513
14219872676477209184
504169174
17244622751296785814
2625321589398702921
4278977611
7239633818635733091
5462796868326918190
1334641629
73073152
7460569593843485201
15287141188316891641
818545595
9339868219275806468
15728902
5382561551670903978
9373330690077689939
18413392000885653589
5832866
63177141
438515402871
2373415502940997016
2148672322930150296
168849237244054062
12339564610979564477
8327325764367420682
7630443591734791098
12608147700378373379
9870724570088730
2150364451439708714
18412545938780258356
13221120945827219803
492241614
4129856608083381232
15740733274947783803
15858116883009440274
1873618476141446514
816513961
17564225130023161250
13697261
10668197763104573447
71041511
5357143003026951378
31022281504720056
1873618501936351339
3801219
442814170389
5701610621477129021
8520914754064026558
15289397306641222853
108593749
98697768
9870724568058057
5780604294184830225
156041850
5192881006389626514
32150304123324262
219257572663
18412545968873930811
5249797099496672683
11127945220196076778
9103100569952650951
11665621
421318034537
17619012718254098754
14443179094226111164
1873618480440216958
69009868
10594427319499622429
814482337
13968724050119231192
28202091681875145
27638110466671725
16166203682344470241
1712194570
472907842721
507970270
15580874172203795679
23689855033805297
154010219
17092164759424403479
12893049762838873864
6877309693745106245
545522479
5887800020369606783
14977809576148535095
19530026
14105033451515939293
6795216411027442152
2543452128325209336
1385890784
114426460
6444189713816225654
6152225714402364510
524384476410219715
17953567922355439196
17113993018971653874
573178715
515834601
17090754617222956318
161874570
1538130937
47186305
30458188512103543
2449021711964768402
2414448843017751282
5214737420442796133
505938649
2625321610894575340
13965057806789381527
970700105235760464
15223822230290106035
16285378285009240167
16940455997476965252
2601013084734032090
5248157445900799208
1580068669843704469
15043322265989680207
29048166685607288
3863606942184311140
820315079
17045009756596405420
29048192480512516
11510172448171493799
5885976160280708469
7602365
17785259896117529586
8856014216854897981
14477731067643038195
1873618514832657292
2578187325
15292499491370895395
33560368941827284
13146357072728951328
17353152791227993245
159842942
15530553734630409457
5569296726948055802
494159375523777824
1812923415
6366353518750729401
4278715465
17097308613030775025
35258719
1899651063193471062
12103109825679658143
6364338522051512284
2429880031182916564
11621189233770302317
72811005
15466754
3880024017885400135
818283447
62914993
4076606625033226775
1873618497637320883
7746405201714873917
5570718
10859426818132543221
6925759835249836137
3506237898852665380
23407812836853915
1873618523432225060
17166316876055971050
18008952305986046279
43123062
9870724569826462
7410173966093388838
33560399035500221
511599051947
214958540605
13237708557081051143
20587696099952690
15339421027537585423
6104586261132347910
11103300151687644832
1456931819
1873618450346281005
9181531069949872018
14650572868605052119
17783567759008991682
575239712866634722
15288269284022357372
6206321673575138470
644219759
13435115
399811749952817933
145335345147610979
70779363
6366071455058494624
7529998377695250462
519635711
3539071
576979807
9568723490388248888
634323816
13012951802393594980
853643387796785445
98435620
28766107292140894
9181555677596944971
5195701200510977145
5129024196560096606
5831598124518278362
4844858457232050089
219257310372
7569568047215545466
5461104800004441485
1518418407735101149
814220189
11403474
18005251247539029895
10333839787251271664
1836516380
8054758354584013306
507708124
163644058
9001701177466488459
2625321606595545096
153748072
4787251587801811388
39059811
545260331
2036204584
5356296971014964874
19267879
9714916684781063078
3055188874828713383
14576212124415364447
2150364417046743283
4662355849599126556
1372824966366170355
1318388695
15289397293744393060
8423108281783224429
505676503
104268357
477206348880
5831598081526006949
4625631396377398109
2625321610894313322
6206321759557388696
12237654281284815334
17236251
9391897711091583990
3891732840317912522
8856014216854636141
5758903550139959418
7340217
638124907
810156929
6206321690772243584
112132697
15287987228927658628
339636063086
7721139320100816372
684134305183500639
22279768720672168
5831598111619679502
14814059355306855043
4211213383
15290243360149735302
18411699880973959188
15204606
11507341268100646834
62652845
6365225483234117329
5308570
3491703531359374171
17791918762976347730
4127600455366674792
11130039777759856047
13951205954302381098
18115578910873816258
8659114857360722535
6153353844499089111
157549179
9870724569564298
16327183209838150989
491717322
214958278120
32432303330691092
17684252729367202593
16965951797418331227
23068994
2272905061487347697
1873618450346019367
7515799761807542411
815989668
2576363817137867614
70517215
17763448248357489818
13172970
3276923
806093689
17621268802185464283
60621205
18411699911067631643
576717661
1685722535145180234
23689824939607125
17256155806064642777
5516892801706297876
12982659022915898414
9870724567533791
15515140725455259155
547029825
219257046468
4180850416920431050
21037361
68485573
11141327
813958043
189614828176542708
1873618480439692390
279448454880
16253215886083360174
572110149897422243
9896616181508082455
153485925
8021450371307931626
38797665
19177566795402134
27356016680241600
669582195
2625321606595283106
554894151
5512098557251945790
9568883447315500158
1440671446449589035
4502324021620638916
3249068390006196153
15292781563660995825
821822415
27356063969248337
18413109967286566983
10911952793442192048
6064503826171693679
11161692095903435283
1004761907965660269
2207210695286917386
6388664954993575829
46662016
5885976061401368013
104006209
5572809636517250553
2625321610894051277
17955470565775510239
4661227814082512385
6368045642960996241
5463642874544129714
16974104
533070599
809894783
18413109997380239438
7078069
637862761
6288511205539515238
3974700764184054454
18613559784442970
2791055594105669609
4504298205224635444
18412263935274844205
2605266760616185153
15287987228927396675
339635799228
92078603
8501910827968825512
5991347884504386492
210659247559
17284241873202253123
16893851873170950707
651404368114879038
18411417873169448972
24838480
5726226344404977639
10259573046193883986
2676958769323838072
72286714
6886936648282539655
14942458
521143041
5046422
13980703149896829784
1495991284
62390697
18199185222634702635
8834282535679560676
15925946803693423456
42598769
9870724569302153
5459976661309982295
11084138473134491150
5303047078245827995
214958016090
12451287838412704489
5509410202188647833
2681814701524780811
10628953736434486617
9774054990929462949
18411417903263121427
3865299049198390675
12910822
5356297009705911966
2421359666
70255067
2248112069177510680
3493395634074945822
60359057
12654580528992553525
519111421
3808100888100343209
3014775
13513632858283052077
15289397310941235057
8861613698626554738
9697577994188492052
155255415
10381427610856195682
9870724567271440
2625321602296252770
14512708438227029368
97911325
489423554
4022831255438034250
30671195
1873618458945324208
20775215
5459976691403654584
813695896
12665415616966166285
5645056620059298667
68223425
1319896024
2390363305266056430
17634738504986593825
20305632407192782
17462509665872383079
1606616067
305243098454
163119765
48431492
10590197086357423689
2787671431665157349
6366353484357502971
18413674021587452000
17620986833073014515
105775699
20869665212206112
4445946672738929841
95879699
2625321606595021110
10906583445476542150
18412827959482056767
17205553309096938840
12294570438877711433
5461104782808583112
544736038
9950019055828534995
5991347927496394467
811664269
5403008449516603011
18411981897376661534
572392279
7677136701370927115
6155045908523191668
18067928196024961188
20587511236070012
103744061
161088132
335336768790
6155045934318095559
13322381941750499717
15291371425760087333
30740222110467489
5245848925746498573
5349308051975768286
4548309565419816229
255984301
5461104787107351969
16711957
10906583475570214623
6365225453139920066
6177363118375897150
6815921
7032232753418799293
5558136817694803400
4030203865610717075
12718336251608304605
18411981927470333989
1545208828
15287141235606883137
5837238474067478018
11705421198335413148
5524868651610213131
210658985303
6098975770044925746
24576334
13151687854617134836
4662073803102881076
72024566
817497011
29330157293733695
17096567568145714575
1454859013759438228
14680310
4784274
62128549
1493907215600323645
6364097387529046615
12583654612056476062
12851509922494416016
1495729137
15287141218411547437
828143439367899804
2523959969279970191
3919394969679695174
7595953279435999504
2625321597997222413
491193030
1839046019115124804
7241043922144659849
18613499598604650
18413391983689269329
10594427319500605883
12648675
4861149623842704773
5782296448490276391
5516046782590617836
518849275
10015828607276288922
15662612681012938353
2752627
60096910
5133829485924779401
7003516464553396964
12903069678853164419
2625321602295990612
97649177
259785401
5464488953846367762
546505531
30409049
374027977988
1396769762
21715680028329254
5637072609524124450
7731877951544692100
1873618458945062288
6767393152337644543
9467310877347154547
5429433323061448040
10617033
1730937871
107356700000258304
425617786716
451412690018
18413392013782941784
12020684574736647824
105513554
3541851256594893702
16038494049631274933
497025749
4661227783988316231
18412545951677546551
5565348467217401524
14428481252717692252
544473890
3344434243
2169005683868174908
5993603989931887912
12972952285742288
13117263636444153530
811402123
2676033356038276482
1873618514833639109
514786024
572130134
160825986
1938490399
10280579133800254203
285938493736356261
6425213859614951480
103481913
11364576519499679975
1881294612915292853
15739206202722094240
4397798316509039896
17011915733784398286
1873618446048496233
14383326641327005
26345813
6156455960443095577
14975681650483333306
819266496
16449809
15288269301218674108
1873618493337504776
5782296461386581535
12162857194684744950
16633695839999756254
6553773
6206321690771457172
5411573444917201071
14273081993166850387
17297538988880889355
9870724570810095
339635275824
101450287
2625321593698192308
91554312
3812049113439014303
492962512
15289397349632182266
342928503145892901
9257009393629660721
13674941621707869313
17952462371364276975
24314188
7676326001635166459
12622921449567619867
14471968401314024391
14418163
71762418
4522126
1873618497636273356
1873618523431177265
31304285008889193
2625321597996960522
42074479
18895601982637667
14883032307819284131
32178524
490930885
5459976661309458015
194314911
1873618454646032908
9386257314251803173
13950077918785243724
5831598146013367591
5882159627828332650
69730775
6100103913039400051
15744000533156660854
12386527
518587129
59834762
9231865831523354279
2490479
2148672331528407961
2908260051937332390
16876615841046071902
9950583114428779661
154731123
13237708539884666883
30458205708158447
2964529530791004471
40042856
2933734509745341832
5459976691403131036
1730675726
1873618484739705502
2676033351739245930
15215179494928287321
14866462842593414402
5463642917535614049
631243623
5885261859847867262
11391362031143292020
506659547
105251406
5778348197355914873
16324853745603185849
5509410163496651347
152699489
15292499534361856724
496763604
544211744
4078298792234977417
5461104782808057591
14648423506775771515
10504814416598927327
8709732826087622782
2544766567488424310
811139977
17088205463377873568
15798241638577276499
2676033356038014277
2785415326238639918
12562453432512743836
12350988444867431112
1873618514833377412
16940553195690134509
45875581
103219765
8854886168440079511
5941764153383128192
2625321589399162008
11818157132458100908
2785415278947600352
15257764832492062794
232154598652
819004351
16187661
4644563108626631009
4000515045253449269
16872667624306444468
1873618493337242815
6291625
6156737968247080128
292346005443
283116224
3220426554520570467
12356593998396393868
684134257893444250
17175427809786595961
9870724570547380
1992881803100621054
2625321593697930351
9450798976826149302
16655465042802838677
6474545510181176536
11740202404159819072
15289397349631921063
9714916620293637762
6098975770044401989
16364556117061994922
196084388
540148509
24052042
11065179658016983681
12480382642832672298
71500270
7285785859232107205
14156017
17632571483632043275
61604254
4259978
17750109864738752812
1873618523430913566
9830100417878166271
14425661002709010016
4794173760728861833
464308734399
510460641
2507605048
41812332
2679637056
99156525
16044698410491643447
9870724568517151
5516046735301085409
6261263733545503259
3759645248384009814
538116878
5779476232874035736
6104586261131037638
10531295842117158093
12124379
69468627
5565348505908348542
814941090
5299098870104394759
14322284629040564382
10440328872292254866
2228331
518324983
16872385650894636566
6284197438710222140
8098722631875955846
5727354392818878727
9870724566484489
154468975
2292825785040636736
3172873343893834792
14418466534433295118
2707725182771857350
15293345523383077603
259261111
19988781
15371922320578972378
19741625396299098
18411699893871247383
12818875419963886521
2676033351738984017
14268291611706526293
1309213649
104989258
6367324841362000185
7432602967203907143
11331649863678691999
15292499534361593441
1815413785
5778348223150556659
5572809636518234139
11408348231855703653
2446197814
13001682102565734253
17186370630874106258
2785415274648570354
14264783202905229777
7171706723174648069
820773835
4645667113710455153
16425638839461284611
5353476806987745228
1840738151924108521
6153071806601889790
810877831
8061124
5356297048398365877
4770547841029572913
12804866717273491655
15580874133512784221
514261733
571605843
12346762090311779845
102957618
10907429529076434052
2625321589398899121
5354604872597767596
4279174221
27638024484621167
8483828720841721486
1459422188
23689889426704296
17648172271756969893
232154335723
15925513
10811668319096800853
6365225478934037607
9763237054719266042
11633356565151157114
63373752
1873618493336979326
6029477
3580814869236944221
5199085482290645376
282854078
2625321593697668091
9870724570285675
7449919019336600171
1839046014815569788
23789896
9131616131521448314
5779476228575003910
5511666277521099409
13940760354079114484
18413109980183855178
644678512
71238122
417019463453
15131353489256221185
447360420122266222
520094464
3997830
15096032016463431129
1873618501936549084
61342108
1873618523430651633
18412263918078459945
5344573059048999857
5155859771100236117
5405598659939206416
27356033876298083
2146416200305806198
5303893093062347743
21758263
3189961199463959445
527958790
69206479
11862232
6364097396127827248
1320879066
365262179507571896
23689855034002659
1473119215
18412263948172132400
31243224015702806
39518566
9870724566222277
545719090
5301355009924597043
9391897706793274792
11514789185312918199
18411417886066737167
5299098848607995194
2284412389694637269
10530167802300925091
10427987387505837891
14322803714593785119
2625321585099869531
6829167367527204602
6013889919468112625
4181978486829943864
8698802578697685482
1654120425802828663
5569296748444387676
1873618441748940565
256967343
5245848947241584851
15862817677379702068
14633483086300318059
288046714075
2203332276215481610
7798976
810615685
237175467
11340219378265033230
313841615983
513999587
18413674004391067740
2116750858326574509
8070938101082033295
2625321589398637514
25099937047839912
5245848878456439955
12118995007347033900
4562124381333884039
31586327206235137
16436648502583690678
9181481831755875838
5516046752497929091
4183106466458307862
1991460714865167155
17082847207615301902
818480058
15663365
73007615
3701600990787603378
63111604
5767329
579208034
1493907215601306869
11535686880442518166
3313969578832561394
2704904932763174902
6570315963541227654
282591932
5726226297114658480
17160329975787685834
8843457619279611284
18413674034484740195
9870724570023121
492176077
30740204914083091
21433663625497129
1629160452
1873618450346477252
18412827972379344962
5243108696682924272
7260902865540482639
816448424
70975974
15287423196122254433
1873618501936285414
5151629580948802356
3735682
61079961
18411981910273949729
7837634943338155161
3597357340772992368
5133829485925763690
51184007
10956724774926813288
98632231
17309267256018536307
9870724567992379
29048106498198701
3544107379218385465
14386655907412249373
219257507157
21496117
68944331
16330874579771459902
11600084
11124082762859154482
5459935770830768809
814416800
347984565637089693
11923578915473263059
575144796
517800693
3297856681506178941
326737923180
16038494049632258844
15104099179857577674
32996413518841137
153944682
2152780467316001469
8722536002903082945
10646954815923686447
545456942
14458654042895551171
3935742187522887052
16064731596255856452
19464489
17648172288953812474
6213874949885069218
14851060135220743194
6471725260172231870
4504298175131421894
573113178
11701191021079496730
12314601354656483126
13957562954616997312
161809033
563217229
104464968
1366033375
1133620930477295468
6209141923583494372
2625321610894509848
5052785364214352114
6155298040667702671
5246977012853376412
4074350485214726972
27328854
1873618441748677997
2000487899013646903
7465404271946632160
7239351853821397993
11742834345080916462
6368045642961454306
5516046795487905107
434216307724
3493677603186412637
810353539
16633695840000739887
821147663836514852
18413391996586557524
7536828
4151361015346562251
14540810596246030644
5995296139937712949
159777405
8816997369364548341
45089144
18412545934481162291
9298403582666148514
15108492788614827244
35193182
5568582435113995179
5570988833963444820
15289397375428069113
15401217
8430474765433179073
10750398672578676906
72745468
5405598728725859379
9250794030848869727
62849456
17422075106091075868
5505181
1873618497637255436
578945889
13106160036035691955
282329787
5570988786672405753
9870724569761068
7031431794891230329
43057525
1706034183
491913932
214958474959
90505732
18412545964574834746
32432303330887118
846140170598090257
5458848587099997499
17607182983838566334
195297952
539362075
5460499872422693597
23265605
943759021439519007
70713826
816186278
2207492642904016905
644154222
60817815
806290300
3473534
1873618501936022824
13307798926833551183
1873618527730926929
11349795265056081195
567018319
9388513449772451585
165610142
2625321576501808484
7290339324003420579
15287141244205140113
41025899
9870724567730368
5569296739846327213
98370083
1531970550
219257244681
2065251783916127931
6151097665987347595
1407386597
3973490993339565383
12463417266756127924
17631161371525515669
21233971
3232498753
4767727591020628301
8972557000702888938
1873618458945784014
15290525376551717170
1559626750
68682184
12689613402799605860
527434500
517538547
3542979343701772038
447112610911
163578521
326737659857
30458205707109873
2625321606595479619
498702419026
555090760
11846037961957312985
2286775792223980496
2676819007
11599686562536949325
3968978683605551949
5831598103022077418
15175534989820758889
3812049126336301758
545194794
12348736218027264207
12743882002561631754
12318365723906541324
8882845388820581451
12769623874203027091
1732546160493595960
10430737389551487761
9512531412808567772
21433723812579518
812123024
9140909979694467183
4025048830681353606
1873618489039455401
18331530485106038
5516046791188875281
6156456003434055463
12474564753552836994
17621561863500597513
104202820
29612220986426501
1996555300
2625321610894247837
17489156252859434801
103179363763095696
15920335005095365860
13112992413209136128
2034107431
17291573824845253535
9772926989806013640
819987397
17170714
1873618467543321286
16156684754098128751
6925759830950740072
7274680
16161820259100396848
3698377064120454404
10296839827164892306
13913370016116443160
1363739614
92275213
210659444315
1784112314702629632
5461104765611674055
507299956084
13237708552781955078
197067432
4211147846
14657391675119111356
25035091
1735459858
15139069
14426056237756189706
12771845711499103316
9940375093616053431
6523880655054768550
62587308
10967349376607587326
1873618497636993704
15290807392954681807
5243033
1133620917580466754
1873618523431898109
11613165301442872555
282067642
9870724569498781
2141513421469058406
14318336791419094928
5885976069999102359
6153917830015027393
214958212644
548995910
90243587
16101055855214332856
9409295256684857617
539099930
30458248699119542
23003457
252379820
6173800107753209956
70451678
13107433
815924131
1873618476140856959
3188833133853148985
3211386
60555668
5514354727165429372
18430745393540238720
5566476498435442740
8821966780582857359
806028152
31022281504130688
15273884660262766886
17153706162049649384
15568274631689570656
98107936
9870724567468020
2625321602296449309
5250413516934940017
10377197347619277484
546964288
2429420595
68420036
13840095604897025041
11075790
1873618506234530930
517276402
31304293607146613
10225919150420460684
32714392818354350
163316374
17480593072628501093
3653991426073234491
28202143271093720
2625321606595217579
669516658
11075097734987253589
544932649
5248951136269502637
24535874148371011
5247593352907000017
13750803869111880047
821756878
5565348488711963913
18940198
23407778443822783
811860878
3910652327921846506
2372569380647405649
6151097721875664077
8481290603310483360
15289115311734721621
5197393238738928914
8858552325786961082
15270695523793439937
103940672
6206603741566403719
151388766
2531021385567766485
7563081637033018620
13044533461222491710
6154199872212897041
9126223058424237061
1160107295621122785
32714349826081871
6152225697206437786
4333982245204396969
7012532
5411521012994803182
5249797159683425776
570557265
17619527108083517000
3758799224970808644
11069796609748044689
210659181949
14926165161459649868
7570985824906512457
3234866947851553000
1906986264008723742
24772943
1873618446046923526
7516607870825792868
14876921
72221177
18411699906768535578
1495925747
62325160
288043895627
31304259214443724
3685635809078676834
4980885
313838798363
13951205954302051853
464309454125
7151957518376504179
6153353870293665804
365428606574
14319322726341872694
3493083035910933027
214957950334
13222096480399396057
22741311
538837783
12845285
1675756474409617568
7676326031729298383
1873618476140594617
70189530
2861086850442987769
12590629664748537952
15501473033754248808
1733166096
2949238
5833854255738587405
6405261049027955879
60293520
6364097417622914469
50397573
15289397310941170468
1436145094782551981
9870724567205432
155189878
7996312456522828750
2413828615876118471
1818166298
97845788
2625321602296187261
4451323549999957434
3544953467117898450
40501610
6364097443417820330
1543385872365455415
12606726616442537392
16436379939763522008
7562235540534921217
546702141
20709678
18413109962987470918
10939233345785957508
1384869222252743071
14383042897579063
245051624454
813630359
5881866613803452649
1455946274504313841
68157888
10813643
4502606072414800438
9388513432576593267
517014256
16739161091967945306
6203168539198949844
20305658202031811
15122676476569913436
48365955
5941764144784016877
12601357272775920269
5900805793554762144
163054228
6155327937823509637
95814162
2625321606594955469
544670501
11092808190891527547
6365225423046182853
3545799490531822688
5991347927496329957
2676033356038473537
6928358494714596151
18895516000586505
18413109993081143373
1317798870
3242943116712479419
8468495303965871404
10215782083327823122
295544243748734701
7536133444401891169
13880529192106527090
18412263930975748140
103678524
8816997365064994109
5513226652957347114
13427220419978791304
4279895118
2581508047683782932
151126621
16436648502584675667
5245789596497153220
18411417868870352907
1574831104
5512098613140196086
16646420
16881311723980129501
580191075
6750384
460010423829
17142588721119759321
5411521012994540776
13331692090551241408
2236213724530672835
10512763733196344280
91750922
493159123
210658919829
5353476789791099071
2973047420892220660
102615266471184862
817431474
71959029
14614773
29330157293667421
18411417898964025362
8854886129749066875
62063012
1631882651478526261
1873618497636468806
1626046306171619904
4718737
6971710725545264615
15463390673086056969
5996988225456246061
2625321597997156982
1258091056198584472
2365498112266798670
12258209558853782455
548471621
200191596416994196
5565348480113903112
10159392401199270768
538575636
5782296448490211725
15289115277341755866
12583138
4959080478982475006
4237766475632413481
2687090
60031373
11241814380293784908
18413674017288355935
10162787574158199843
5625593289148533238
605557034314828631
2625321602295925195
97583640
16546579671803956126
546439994
13513914891881875478
18412827955182960702
18142877345697171235
8716776878113885241
5991347923197297866
21715680028265805
5299098848608717979
2686971790050919863
10551496
2676033351739442523
5246976935469649046
4236074403011431549
5561348123192067576
516752111
13525196865559988902
451412624470
6813843502384089093
3452050537366752044
2723374776553770162
105448017
14284319595218536933
356832576945
1987904546
2789363555876800106
17063697102470777209
6584302816815089825
5727354422913010657
13944415416121166662
28311895
11906248855590275274
3707523343842937215
18412827985276633157
821232589
18415907
2676033356038210923
17257283880273643533
18331556279224644
9117971362513815455
18411981923171237924
309541536868
113312346
46072191
103416376
27920126869375123
160760449
361131345578
9234597529149245860
14835085562484362568
4585257123188181630
1413046597527538184
6208295874376239521
13217980679449939250
1966081057
6101795981361546864
16384272
10370417990725208293
4196703391028741586
6488236
63832509
5153885660580611393
6155045912821630127
5197393273132877515
2625321593698126810
10720606758114626648
9870724570745030
30740204914804024
91488775
7792373120121047026
3579577413
5458848587100981064
755605599842665887
17404805271631431757
417019921504
9386257335747873389
817169327
18413391979390173264
71696881
8328637003859953646
14665059300281706
6101796011455220816
4456589
13070886371126478108
8733200714257204941
10913926882465549337
29330183088310857
61800865
14949273699027977966
1873618523431110190
3573803894998305775
5569296709751605280
5835546375651263675
9870724568714358
42008942
1746899701160150410
9664889374910385451
7406761759861377295
2625321597996894992
365428082633
11888218815508973537
6311975551774360856
1408369638
6101795942670075923
15515140772745448064
27638058877519937
13361048879788721990
2430665780
22217020
538313489
927164962728314711
69665238
27638084672424186
2573543627316201844
12320990
2424942
18413392009483845719
3660444556051220001
18412545947378450486
154665586
9870724566681132
546177847
2229804632046437624
5245848917148372136
15906307047154976446
827351178595273968
5780604350074062990
6350640494756627870
9198943117821938833
2676033351739180486
1192315303887243384
67633599
6205475723246636047
17419818910382754661
162529937
17083693235326683482
105185869
8912366315847026281
5249797202674912471
2446394423
1461650414
257426098
17299513133793348673
4451048243670025981
14597841535548131734
14130457194541352666
15290525359355331959
9195012299735698785
524354306
429916226796
6153353788611431303
1728578573
6153071806602085789
2676033356037948725
8257735
2785415326238575484
1873618489038408278
8072726556923202784
7731878007432940921
16271603835638319461
11229884474259868248
5835546388547569431
2704904949958969710
103154228
2625321589399096275
6887529782530082437
45810044
16365628939578247566
4408861808311732424
3554388240579364748
3431353251379022211
4131548706499659810
3229097897723824621
818938814
16122124
10831084194895235709
6226088
6366071472254485645
10441809166173275876
9538952396691934382
5994450030541998229
6835382734606174906
4397798273518472097
2625321593697864817
9870724570481756
17782439637510195701
31304332299601191
4074350515307087985
10758418391935682553
11405246090117384413
196018851
17943317531894613402
15289397375426759758
1801651221
12716605781588708278
5353476789790574588
1873618450346936800
14462121002204464918
2785415309041207732
71434733
10770155859627543824
1873618476141841211
5780604362970367638
2530739313276357975
14090480
5567604589840172352
296644709200
11266915032714840583
4194441
2200512120787569683
2549492329236335496
6211116016906930204
99090988
9625506809262378259
13237708535585570818
490103571663
14541340640523322842
9870724568450966
1793158821936040552
9486667438472824267
21954873
538051341
1398211555
5408700909154273182
5356297014005859746
8444237263823374707
69403090
2599235317101562153
15897859265386515143
6097847713031849822
2162794
9796067026192895123
13117159209037203716
164299420
17088031212435737557
8099682237308012832
8971880411373045432
3099205763721988894
9870724566418979
545915701
13237708565679243273
4449074137450482853
18115860927276518423
5247593352907982888
16533468055605152863
1873618458944474091
19923244
3188833116656765520
2676033351738918494
4501477955215362649
17621268784989013395
14581169549127125939
6206321707968234614
33278352538406314
516227820
6890349946557761313
1411918553413126104
162267790
2474797953316292924
1694703987789596868
18172096623373846790
28766090095429261
1223976979390989739
3221822110943152678
104923721
15185362616787929146
10003084053115964048
2625321585100065781
437798118096833445
1815348248
31304323701802109
152371807
14046027923586223423
2021331689141374237
20869691006257762
13044533461223476582
16778219695595128445
12057002331826554305
17465760298758178660
7576852735584046364
129168850403198609
820708298
17891616
1873618489038145001
7995587
11911353550167017696
4522983015860209939
12612941966326959190
102892081
2625321589398833886
45547899
11548493110908749415
4076606693818764590
7851156332894489575
12779163922391107832
5991347884505304103
1095239150174145285
3863606920688567965
10771469979967884371
15859976
14312864964518020808
17245750799710423012
5963940
10655291933708585535
4162099616697747321
63308215
1873618519131818153
30176189305784773
53412232
318140582948
15611911946388048179
12640696470018459947
30176223702288623
9870724570219682
33278412725750974
1409876968
28766150282773591
1873618450346674286
15290243360148359553
14036340911856223966
6365225461738636619
816645035
417019398489
6206321673575531611
12057284352529139627
71172585
13828334
7528870385169533979
5832726134240118664
2785415334835848520
2572415553107265488
61276571
3932293
9870724568188981
1873618549225491555
2360543918673038210
98828841
12512221777814685432
17939922315943150958
6045857707735386835
21692726
4502324038816629924
11490081257974859839
17639632887023929831
1316357237551401394
6101795994259359091
11796695
69140942
18411699889572151318
12074216556992400767
1320813529
8618954206934993224
164037275
4160546838840674266
12591757708863407913
555549513
9870724566156739
154141293
32714414313178248
545653553
223556471268
12613788024133322735
812581780
5778348150066318224
1500709877
6741138607599781046
9227353569080969220
515965674
13884327378110449525
18411699919665823773
16340493341965880015
162005644
620757861
21997756618049241
17007720368052373541
13001845694847518363
227855238971
17629469
1737950228
9288263741171697848
20305615210743190
1873618489037883086
18613533990193666
7733439
313841551493
15288551330518206781
17302333254828493968
6153071832396467338
2979056014524680527
8857706336766199103
2625321589398571980
45285754
5991347884505041337
4502324004423927097
16874702537456224943
14911447610171655366
13944990587222231178
3308118261903721908
18413109975884759113
8412057600244518110
15597828
2538734651
818414521
17082847207615236134
18276979644936029994
5701792
63046067
5882159696614657105
1410790466305853323
18412263913779363880
32714379920475611
539325825270679628
1873618519131556994
13536993689470216
9870724569957729
43254135
5153885686374731086
9387385384162626351
8336200085500660803
5303047104041388600
5512098595943810546
5717788221838658971
2324121364801391676
12012735189037878155
2192639020
1873618476141316771
70910437
3670145
2219404100148201532
2544580112253650683
61014424
6155045921420412650
18412263943873036335
1873618549225229533
9870724567926898
98566694
29894215892535509
155910777
6366353527348399255
9956242218935388443
31586340104504804
219257441372
13522668389390157414
18411417881767641102
11534547
279448847671
7242736046355514492
68878794
814351263
1192315299587689576
2524775482
34124461934314600
507839197
5539270545646881104
4974759074281293673
5337229686545450161
153879145
12644080653952551280
30458205707308380
545391405
17877509356004052233
17520266449292560845
11065487246536017596
2011949215506761725
6155045882728942511
812319634
1130753852548581517
573047641
5299098874402571932
18413674000091971675
18331556280207363
17269866578628118199
15289397293744523027
161743496
10649664295314066054
6051485356288903427
4347925833116091776
30458188511970924
104399431
10184384893691038634
7401639761433855789
1308623824
563151692
2625321610894444316
7239069803025663720
11434534198373320614
1873618441748613384
5622264654903379074
29330122899915877
15636380174699072146
820184006
2597848126
10233694917695638297
14585410861575638263
7471291
85348920764927349
6366353492955694732
18413674030185644130
4127600472562141528
35127645
5780604337176709161
541328159
2524806001290315567
13850612818404510827
18412827968080248897
15335680
3493395603981665996
17858552114457937219
62783919
3875793754648151904
5564423899624572258
292345154665
3489447322753895731
18411981905974853664
5439644
42991988
9870724569695611
12269921124804135698
559088458
33278386930321618
15289397353931868100
214958409445
6219166245997316001
15289397379726773461
30458248699315998
23200068
12163381674616883890
70648289
9000175594581527004
806224763
89657146100418951
15475002888547338265
3407997
60752278
18411981936068526119
14267039342724252928
13726068525522684375
1873618527730862181
4504298213822565083
155648632
98304546
9870724567665640
13681696359428851594
219257178788
24535844054893958
50011031689890353
10532987940533372886
11272401
23407795639356361
68616647
814089116
15635925519041823968
1998521381
163512984
797977540607610221
32150286927595340
4709060078846741586
5967447917778832244
5885976078596834724
2625321606595414132
153616999
1744643526947965735
17461812017531651650
987047180239768912
30740239306197230
15288833278135765839
525337347
5885976155981547843
18413391992287461459
10532987970627045461
56689033
5722409915131627177
114033243
10159956468397444373
18412545930182066226
5349367342193968413
13819010092172884
104137283
17953636526298302297
2224234517276395067
2789363555875490728
2625321610894182276
12426051065400527122
9355193091131312182
30740222110861163
14361095630442006439
3137288237381257087
17105177
819921860
7209143
1727529996
810025856
805679481429165719
17298949057997047589
21997713627284659
16120716880803858984
33560368941433940
1535706104
10229733804179524009
18412545960275738681
9714916620294556051
4078298775038527628
5461104765611607541
210659378559
92209676
13418544886826534789
14264208172476401284
1917322269
197001895
24969554
5405598728725530322
15073532
817890229
72417787
1873618471842024407
17091318705916150977
5946696443085589628
5177496
5847102830955857465
62521771
1873618523431831649
5835546371351184527
14824583848163281869
42729843
9870724569433729
5780604315680310424
16385074671182940805
214958147231
3007753865419557454
491586249
17943317531893566468
1801912319444323213
22937920
539034393
27356055371580547
1873618476140792146
5198803303557629187
6103488088376871190
13041896
1733362705
70386141
2306802734
643826540
3145849
14637903957824965363
519242494
60490131
805962615
5784522635265967958
1873618527730601376
18301216972082383618
11644189250161151139
2625321602296383846
9870724567402585
98042399
15741861301866530650
494403323033
6729754102968812754
546898751
6208295835683456476
33560403333875446
14409153078548760239
15530271666638163275
1873618458945456185
16951650337051970851
5144036663261072615
813826970
12133908888583014197
68354499
11010253
279448324634
14749580058850363919
6633286351216577743
2089265852158774334
8929038315166239946
31586271318836879
13678484518713821516
105906772
96010773
2625321606595152102
153354852
10831360821402142464
5652457623480305518
8503320935775669540
16483453074211931840
363084051790629688
544867112
258146996
5944020284604679310
5782296431293302176
28484176870181368
23407778443758207
3973491023432910866
5778348175860436286
1873618514834032208
5438906422044199526
103875135
7697026996393675938
1709507593
161219206
13237708548482859013
3701601059573925529
879419277503368073
3822179681402096264
5565348445721659362
532291916112267238
256115374
1460339693
13351948495571782591
14665351642484132
3008657884776564221
2341393787733871788
16904712944497920326
3967850626592737364
16843031
4131548702199581670
6946995
809763710
1928986057181235415
11964228788262537512
2989761681675848960
1873618519132801026
7276444624641068235
5994450030542718433
12284124821458521275
111739480
4076606646528706921
13650504529854072320
15804734059994287439
14425661019905001872
2395604016
14465116522071263669
210659116497
15290243360149343057
15777957523720635747
10167863869407233224
18331517588211470
12884708026702235763
14811384
72155640
7042731044489660311
15288269305517836796
5675796551176948530
14264208198271043974
1495860210
5787083718919720300
25099894056749168
683965395648908415
62259623
4915348
12974919760129952993
6155045917120857525
1873618523431569790
9013091190501541709
4392112055939237960
2625321597997353452
15897908900500866947
6177363174264606048
15872788267758849077
491324104
33560399034844286
22675774
17542946455516547053
2431124533
538772246
27920040887322186
8704274751914773568
12085352355710699032
6153353775713551670
70123993
27356081166223293
7885152524183078888
60227983
2883701
11700344903086704893
7329667560521271617
518980348
5833854255738521265
8618954206935976415
3901910077209972079
1713308683
1992881785903908578
4530582984922301900
16130159995999161574
155124341
2625321602296121720
1884114794138700522
5778348218852443426
97780251
4240022615453076686
6097847786116483627
6361518319333476776
30540122
28484146776247610
546636604
5741055947585816645
6100103891543657570
8807886331112851129
813564822
10223260478367337870
746324852
15287423226215073909
11226550812567014265
1491796976
8097653480026868144
5995296157134227520
1873618532029106835
1539245050
48300418
331037869860
95748625
6314795724398267312
5888081980883929307
544604964
34124418943289166
5245848947242502849
32432363517642192
2676033356038407648
811533196
1317733333
8920676095134336910
17149817495305717193
918014392040164136
103612987
8695136395555507435
18349504802666319185
14847634415788362123
1584661506
4287350266942457603
525512494730316455
5881302580997523790
1574765567
3784125305237867347
819397570
8326286517935867839
16149105318148965958
16580883
6684847
18411699902469439513
11229983338076703492
15292499491369977714
339635406848
9870724570940976
100
101
102
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/data/text/chess.pgn
|
[Event "F/S Return Match"]
[Site "Belgrade, Serbia JUG"]
[Date "1992.11.04"]
[Round "29"]
[White "Fischer, Robert J."]
[Black "Spassky, Boris V."]
[Result "1/2-1/2"]
1. e4 e5 2. Nf3 Nc6 3. Bb5 a6 {This opening is called the Ruy Lopez.}
4. Ba4 Nf6 5. O-O Be7 6. Re1 b5 7. Bb3 d6 8. c3 O-O 9. h3 Nb8 10. d4 Nbd7
11. c4 c6 12. cxb5 axb5 13. Nc3 Bb7 14. Bg5 b4 15. Nb1 h6 16. Bh4 c5 17. dxe5
Nxe4 18. Bxe7 Qxe7 19. exd6 Qf6 20. Nbd2 Nxd6 21. Nc4 Nxc4 22. Bxc4 Nb6
23. Ne5 Rae8 24. Bxf7+ Rxf7 25. Nxf7 Rxe1+ 26. Qxe1 Kxf7 27. Qe3 Qg5 28. Qxg5
hxg5 29. b3 Ke6 30. a3 Kd6 31. axb4 cxb4 32. Ra5 Nd5 33. f3 Bc8 34. Kf2 Bf5
35. Ra7 g6 36. Ra6+ Kc5 37. Ke1 Nf4 38. g3 Nxh3 39. Kd2 Kb5 40. Rd6 Kc5 41. Ra6
Nf2 42. g4 Bd3 43. Re6 1/2-1/2
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/structs/test_struct_methods.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/options/test_options.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/text/test_text_methods.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
import random
import string
import numpy as np
import pytest
import cudf
from cudf.core.byte_pair_encoding import BytePairEncoder
from cudf.core.tokenize_vocabulary import TokenizeVocabulary
from cudf.testing._utils import assert_eq
def test_tokenize():
strings = cudf.Series(
[
"the quick fox jumped over the lazy dog",
"the siamésé cat jumped under the sofa",
None,
"",
]
)
expected_values = [
"the",
"quick",
"fox",
"jumped",
"over",
"the",
"lazy",
"dog",
"the",
"siamésé",
"cat",
"jumped",
"under",
"the",
"sofa",
]
expected_index = strings.index.repeat(strings.str.token_count())
expected = cudf.Series(expected_values, index=expected_index)
actual = strings.str.tokenize()
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_tokenize_delimiter():
strings = cudf.Series(
[
"the quick fox jumped over the lazy dog",
"the siamésé cat jumped under the sofa",
None,
"",
]
)
expected_values = [
"the quick f",
"x jumped ",
"ver the lazy d",
"g",
"the siamésé cat jumped under the s",
"fa",
]
expected_index = strings.index.repeat(strings.str.token_count("o"))
expected = cudf.Series(expected_values, index=expected_index)
actual = strings.str.tokenize(delimiter="o")
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_detokenize():
strings = cudf.Series(
[
"the",
"quick",
"fox",
"jumped",
"over",
"the",
"lazy",
"dog",
"the",
"siamésé",
"cat",
"jumped",
"under",
"the",
"sofa",
]
)
indices = cudf.Series([0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3])
actual = strings.str.detokenize(indices)
expected = cudf.Series(
[
"the quick fox",
"jumped over",
"the lazy dog",
"the siamésé cat jumped under the sofa",
]
)
assert type(expected) == type(actual)
assert_eq(expected, actual)
indices = cudf.Series(
[4, 0, 0, 0, 0, 4, 1, 1, 4, 2, 2, 2, 2, 4, 3], dtype=np.int8
)
actual = strings.str.detokenize(indices, "+")
expected = cudf.Series(
[
"quick+fox+jumped+over",
"lazy+dog",
"siamésé+cat+jumped+under",
"sofa",
"the+the+the+the",
]
)
assert type(expected) == type(actual)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"delimiter, expected_token_counts",
[
("", [10, 9, 0, 0, 5]),
("o", [6, 3, 0, 0, 1]),
(["a", "e", "i", "o", "u"], [13, 13, 0, 0, 6]),
(["a", "e", "i", "o"], [12, 11, 0, 0, 6]),
],
)
def test_token_count(delimiter, expected_token_counts):
strings = cudf.Series(
[
"the quick brown fox jumped over the lazy brown dog",
"the sable siamésé cat jumped under the brown sofa",
None,
"",
"test_str\x01test_str\x02test_str\x03test_str\x04test_str\x05",
]
)
expected = cudf.Series(expected_token_counts)
actual = strings.str.token_count(delimiter)
assert type(expected) == type(actual)
assert_eq(expected, actual, check_dtype=False)
@pytest.mark.parametrize(
"delimiter, input, default_id, results",
[
(
"",
"the quick brown fox jumps over the lazy brown dog",
99,
[0, 1, 2, 3, 4, 5, 0, 99, 2, 6],
),
(
" ",
" the sable siamésé cat jumps under the brown sofa ",
-1,
[0, 7, 8, 9, 4, 10, 0, 2, 11],
),
(
"_",
"the_quick_brown_fox_jumped__over_the_lazy_brown_dog",
-99,
[0, 1, 2, 3, -99, 5, 0, -99, 2, 6],
),
],
)
def test_tokenize_with_vocabulary(delimiter, input, default_id, results):
vocabulary = cudf.Series(
[
"the",
"quick",
"brown",
"fox",
"jumps",
"over",
"dog",
"sable",
"siamésé",
"cat",
"under",
"sofa",
]
)
tokenizer = TokenizeVocabulary(vocabulary)
strings = cudf.Series([input, None, "", input])
expected = cudf.Series(
[
cudf.Series(results, dtype=np.int32),
None,
cudf.Series([], dtype=np.int32),
cudf.Series(results, dtype=np.int32),
]
)
actual = tokenizer.tokenize(strings, delimiter, default_id)
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_normalize_spaces():
strings = cudf.Series(
[
" the\t quick fox jumped over the lazy dog",
"the siamésé cat\f jumped\t\tunder the sofa ",
None,
"",
]
)
expected = cudf.Series(
[
"the quick fox jumped over the lazy dog",
"the siamésé cat jumped under the sofa",
None,
"",
]
)
actual = strings.str.normalize_spaces()
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_normalize_characters():
strings = cudf.Series(
["乾 \t 乿", "ĂĆCĖÑTÜATE", "âscénd, Descend", "", None, "Stock^ $1"]
)
expected = cudf.Series(
[
" 乾 乿 ",
"accentuate",
"ascend , descend",
"",
None,
"stock ^ $ 1",
]
)
actual = strings.str.normalize_characters()
assert type(expected) == type(actual)
assert_eq(expected, actual)
expected = cudf.Series(
[
" 乾 乿 ",
"ĂĆCĖÑTÜATE",
"âscénd , Descend",
"",
None,
"Stock ^ $ 1",
]
)
actual = strings.str.normalize_characters(do_lower=False)
assert type(expected) == type(actual)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"n, separator, expected_values",
[
(
2,
"_",
[
"this_is",
"is_my",
"my_favorite",
"favorite_book",
"book_on",
"on_my",
"my_bookshelf",
],
),
(
3,
"-",
[
"this-is-my",
"is-my-favorite",
"my-favorite-book",
"favorite-book-on",
"book-on-my",
"on-my-bookshelf",
],
),
],
)
def test_ngrams(n, separator, expected_values):
strings = cudf.Series(
["this", "is", "my", "favorite", "book", "on", "my", "bookshelf"]
)
expected = cudf.Series(expected_values)
actual = strings.str.ngrams(n=n, separator=separator)
assert type(expected) == type(actual)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"n, expected_values, expected_index, as_list",
[
(
2,
[
"th",
"hi",
"is",
"is",
"my",
"bo",
"oo",
"ok",
"he",
"er",
"re",
cudf.NA,
],
[1, 1, 1, 2, 3, 4, 4, 4, 5, 5, 5, 6],
False,
),
(
3,
[
"thi",
"his",
cudf.NA,
cudf.NA,
"boo",
"ook",
"her",
"ere",
cudf.NA,
],
[1, 1, 2, 3, 4, 4, 5, 5, 6],
False,
),
(
3,
[["thi", "his"], [], [], ["boo", "ook"], ["her", "ere"], []],
[1, 2, 3, 4, 5, 6],
True,
),
],
)
def test_character_ngrams(n, expected_values, expected_index, as_list):
strings = cudf.Series(
["this", "is", "my", "book", "here", ""], index=[1, 2, 3, 4, 5, 6]
)
expected = cudf.Series(expected_values, index=expected_index)
actual = strings.str.character_ngrams(n=n, as_list=as_list)
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_hash_character_ngrams():
strings = cudf.Series(["abcdefg", "stuvwxyz"])
expected = cudf.Series(
[
cudf.Series([3902511862, 570445242, 4202475763], dtype=np.uint32),
cudf.Series(
[556054766, 3166857694, 3760633458, 192452857], dtype=np.uint32
),
]
)
actual = strings.str.hash_character_ngrams(5, True)
assert type(expected) == type(actual)
assert_eq(expected, actual)
actual = strings.str.hash_character_ngrams(5)
expected = expected.explode()
assert type(expected) == type(actual)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"n, separator, expected_values",
[
(
2,
"_",
[
"this_is",
"is_my",
"my_favorite",
"book_on",
"on_my",
"my_bookshelf",
],
),
(
3,
"-",
["this-is-my", "is-my-favorite", "book-on-my", "on-my-bookshelf"],
),
],
)
def test_ngrams_tokenize(n, separator, expected_values):
strings = cudf.Series(["this is my favorite", "book on my bookshelf"])
expected = cudf.Series(expected_values)
actual = strings.str.ngrams_tokenize(n=n, separator=separator)
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_character_tokenize_series():
sr = cudf.Series(
[
"hello world",
"sdf",
None,
(
"goodbye, one-two:three~four+five_six@sev"
"en#eight^nine heŒŽ‘•™œ$µ¾ŤƠé DŽ"
),
]
)
expected_values = [
"h",
"e",
"l",
"l",
"o",
" ",
"w",
"o",
"r",
"l",
"d",
"s",
"d",
"f",
"g",
"o",
"o",
"d",
"b",
"y",
"e",
",",
" ",
"o",
"n",
"e",
"-",
"t",
"w",
"o",
":",
"t",
"h",
"r",
"e",
"e",
"~",
"f",
"o",
"u",
"r",
"+",
"f",
"i",
"v",
"e",
"_",
"s",
"i",
"x",
"@",
"s",
"e",
"v",
"e",
"n",
"#",
"e",
"i",
"g",
"h",
"t",
"^",
"n",
"i",
"n",
"e",
" ",
"h",
"e",
"Œ",
"Ž",
"‘",
"•",
"™",
"œ",
"$",
"µ",
"¾",
"Ť",
"Ơ",
"é",
" ",
"DŽ",
]
expected_index = sr.index.repeat(sr.str.len().fillna(0))
expected = cudf.Series(expected_values, index=expected_index)
actual = sr.str.character_tokenize()
assert_eq(expected, actual)
sr = cudf.Series([""])
expected = cudf.Series([], dtype="object")
actual = sr.str.character_tokenize()
assert_eq(expected, actual)
sr = cudf.Series(["a"])
expected = cudf.Series(["a"])
actual = sr.str.character_tokenize()
assert_eq(expected, actual)
def test_character_tokenize_index():
sr = cudf.core.index.as_index(
[
"hello world",
"sdf",
None,
(
"goodbye, one-two:three~four+five_six@sev"
"en#eight^nine heŒŽ‘•™œ$µ¾ŤƠé DŽ"
),
]
)
expected = cudf.core.index.as_index(
[
"h",
"e",
"l",
"l",
"o",
" ",
"w",
"o",
"r",
"l",
"d",
"s",
"d",
"f",
"g",
"o",
"o",
"d",
"b",
"y",
"e",
",",
" ",
"o",
"n",
"e",
"-",
"t",
"w",
"o",
":",
"t",
"h",
"r",
"e",
"e",
"~",
"f",
"o",
"u",
"r",
"+",
"f",
"i",
"v",
"e",
"_",
"s",
"i",
"x",
"@",
"s",
"e",
"v",
"e",
"n",
"#",
"e",
"i",
"g",
"h",
"t",
"^",
"n",
"i",
"n",
"e",
" ",
"h",
"e",
"Œ",
"Ž",
"‘",
"•",
"™",
"œ",
"$",
"µ",
"¾",
"Ť",
"Ơ",
"é",
" ",
"DŽ",
]
)
actual = sr.str.character_tokenize()
assert_eq(expected, actual)
sr = cudf.Index([""])
expected = cudf.Index([], dtype="object")
actual = sr.str.character_tokenize()
assert_eq(expected, actual)
sr = cudf.core.index.as_index(["a"])
expected = cudf.core.index.as_index(["a"])
actual = sr.str.character_tokenize()
assert_eq(expected, actual)
def test_text_replace_tokens():
sr = cudf.Series(["this is me", "theme music", ""])
targets = cudf.Series(["is", "me"])
expected = cudf.Series(["this _ _", "theme music", ""])
actual = sr.str.replace_tokens(targets, "_")
assert_eq(expected, actual)
replacements = cudf.Series(["IS", "ME"])
expected = cudf.Series(["this IS ME", "theme music", ""])
actual = sr.str.replace_tokens(targets, replacements)
assert_eq(expected, actual)
sr = cudf.Series(
[
"this is a small text ☕",
"this \t\t is ; ; - + a looooooooooonnnnnnnggggggg text \n\t",
"emptyme",
],
)
targets = cudf.Series(
["a", "☕", "\t", "looooooooooonnnnnnnggggggg", "emptyme"]
)
replacements = cudf.Series(["the", "🚒", "🚒🚒🚒🚒", "🔥🔥", ""])
expected = cudf.Series(
[
"this is the small text 🚒",
"this \t\t is ; ; - + the 🔥🔥 text \n\t",
"",
]
)
actual = sr.str.replace_tokens(targets, replacements)
assert_eq(expected, actual)
sr = cudf.Series(
["All-we-need;is;🔥", "\tall-we-need0is;🌊", "all;we:need+is;🌬"]
)
targets = cudf.Series(["🌬", "🔥", "🌊"])
replacements = "🚰"
expected = cudf.Series(
["All-we-need;is;🚰", "\tall-we-need0is;🚰", "all;we:need+is;🚰"]
)
actual = sr.str.replace_tokens(targets, replacements, delimiter=";")
assert_eq(expected, actual)
assert_eq(sr, sr.str.replace_tokens(targets, replacements))
assert_eq(sr, sr.str.replace_tokens([""], [""]))
def test_text_replace_tokens_error_cases():
sr = cudf.Series(["this is me", "theme music", ""])
with pytest.raises(
TypeError,
match="targets should be an array-like or a Series object, "
"found <class 'str'>",
):
sr.str.replace_tokens("me", ["a"])
with pytest.raises(
ValueError,
match="targets and replacements should be same size"
" sequences unless replacements is a string.",
):
sr.str.replace_tokens(["a"], ["me", "ki"])
with pytest.raises(
TypeError,
match="replacements should be an str, array-like or Series object,"
" found <class 'set'>",
):
sr.str.replace_tokens(["a"], {"s"})
with pytest.raises(
TypeError,
match="Type of delimiter should be a string, found <class 'list'>",
):
sr.str.replace_tokens(["a"], ["s"], delimiter=["a", "b"])
def test_text_filter_tokens():
sr = cudf.Series(["the quick brown fox jumped", "over the lazy dog", ""])
expected = cudf.Series([" quick brown jumped", " ", ""])
actual = sr.str.filter_tokens(5)
assert_eq(expected, actual)
expected = cudf.Series(["🔥 quick brown 🔥 jumped", "🔥 🔥 🔥 🔥", ""])
actual = sr.str.filter_tokens(5, "🔥")
assert_eq(expected, actual)
sr = cudf.Series(
["All-we-need;is;🔥", "\tall-we-need0is;🌊", "all;we:need+is;🌬"]
)
expected = cudf.Series(
["All-we-need;is;--", "\tall-we-need0is;--", "all;we:need+is;--"]
)
actual = sr.str.filter_tokens(2, "--", ";")
assert_eq(expected, actual)
assert_eq(sr, sr.str.filter_tokens(1))
def test_text_filter_tokens_error_cases():
sr = cudf.Series(["abc", "def", ""])
with pytest.raises(
TypeError,
match="Type of replacement should be a string, found <class 'list'>",
):
sr.str.filter_tokens(3, replacement=["a", "b"])
with pytest.raises(
TypeError,
match="Type of delimiter should be a string, found <class 'list'>",
):
sr.str.filter_tokens(3, delimiter=["a", "b"])
def test_edit_distance():
sr = cudf.Series(["kitten", "saturday", "address", "book"])
tg = cudf.Series(["sitting", "sunday", "addressee", "back"])
expected = cudf.Series([3, 3, 2, 2], dtype=np.int32)
actual = sr.str.edit_distance(tg)
assert_eq(expected, actual)
expected = cudf.Series([0, 7, 6, 6], dtype=np.int32)
actual = sr.str.edit_distance("kitten")
assert_eq(expected, actual)
def test_edit_distance_matrix():
# normal
sr = cudf.Series(["rounded", "bounded", "bounce", "trounce", "ounce"])
expected = cudf.Series(
[
[0, 1, 3, 3, 3],
[1, 0, 2, 4, 3],
[3, 2, 0, 2, 1],
[3, 4, 2, 0, 2],
[3, 3, 1, 2, 0],
]
)
got = sr.str.edit_distance_matrix()
assert_eq(expected, got, check_dtype=False)
# 1-row series
sr2 = cudf.Series(["x"])
with pytest.raises(ValueError, match="Require size >= 2"):
sr2.str.edit_distance_matrix()
# null rows
sr3 = cudf.Series(["rounded", None, "bounce", "trounce", "ounce"])
with pytest.raises(ValueError, match="Cannot compute"):
sr3.str.edit_distance_matrix()
def test_porter_stemmer_measure():
strings = cudf.Series(
[
"tr",
"ee",
"tree",
"y",
"by",
"trouble",
"oats",
"trees",
"ivy",
"troubles",
"private",
"oaten",
"orrery",
None,
"",
]
)
expected = cudf.Series(
[0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, None, 0], dtype=np.int32
)
actual = strings.str.porter_stemmer_measure()
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_is_vowel_consonant():
strings = cudf.Series(
["tr", "ee", "tree", "y", "by", "oats", "ivy", "orrery", None, ""]
)
expected = cudf.Series(
[False, False, True, False, False, False, True, False, None, False]
)
actual = strings.str.is_vowel(2)
assert type(expected) == type(actual)
assert_eq(expected, actual)
expected = cudf.Series(
[True, False, True, False, False, False, True, True, None, False]
)
actual = strings.str.is_consonant(1)
assert type(expected) == type(actual)
assert_eq(expected, actual)
indices = cudf.Series([2, 1, 0, 0, 1, 2, 0, 3, 0, 0])
expected = cudf.Series(
[False, True, False, False, True, False, True, True, None, False]
)
actual = strings.str.is_vowel(indices)
assert type(expected) == type(actual)
assert_eq(expected, actual)
expected = cudf.Series(
[False, False, True, True, False, True, False, False, None, False]
)
actual = strings.str.is_consonant(indices)
assert type(expected) == type(actual)
assert_eq(expected, actual)
def test_minhash():
strings = cudf.Series(["this is my", "favorite book", None, ""])
expected = cudf.Series(
[
cudf.Series([21141582], dtype=np.uint32),
cudf.Series([962346254], dtype=np.uint32),
None,
cudf.Series([0], dtype=np.uint32),
]
)
actual = strings.str.minhash()
assert_eq(expected, actual)
seeds = cudf.Series([0, 1, 2], dtype=np.uint32)
expected = cudf.Series(
[
cudf.Series([1305480167, 668155704, 34311509], dtype=np.uint32),
cudf.Series([32665384, 3470118, 363147162], dtype=np.uint32),
None,
cudf.Series([0, 0, 0], dtype=np.uint32),
]
)
actual = strings.str.minhash(seeds=seeds, width=5)
assert_eq(expected, actual)
expected = cudf.Series(
[
cudf.Series([3232308021562742685], dtype=np.uint64),
cudf.Series([23008204270530356], dtype=np.uint64),
None,
cudf.Series([0], dtype=np.uint64),
]
)
actual = strings.str.minhash64()
assert_eq(expected, actual)
seeds = cudf.Series([0, 1, 2], dtype=np.uint64)
expected = cudf.Series(
[
cudf.Series(
[7082801294247314046, 185949556058924788, 167570629329462454],
dtype=np.uint64,
),
cudf.Series(
[382665377781028452, 86243762733551437, 7688750597953083512],
dtype=np.uint64,
),
None,
cudf.Series([0, 0, 0], dtype=np.uint64),
]
)
actual = strings.str.minhash64(seeds=seeds, width=5)
assert_eq(expected, actual)
# test wrong seed types
with pytest.raises(ValueError):
strings.str.minhash(seeds="a")
with pytest.raises(ValueError):
seeds = cudf.Series([0, 1, 2], dtype=np.int32)
strings.str.minhash(seeds=seeds)
with pytest.raises(ValueError):
seeds = cudf.Series([0, 1, 2], dtype=np.uint32)
strings.str.minhash64(seeds=seeds)
def test_jaccard_index():
str1 = cudf.Series(["the brown dog", "jumped about"])
str2 = cudf.Series(["the black cat", "jumped around"])
expected = cudf.Series([0.058824, 0.307692], dtype=np.float32)
actual = str1.str.jaccard_index(str2, 5)
assert_eq(expected, actual)
actual = str2.str.jaccard_index(str1, 5)
assert_eq(expected, actual)
with pytest.raises(ValueError):
str1.str.jaccard_index(str2, 1)
with pytest.raises(ValueError):
str3 = cudf.Series(["not enough rows"])
str1.str.jaccard_index(str3, 5)
def _make_list_of_strings_of_random_length(
num_strings, min_length, max_length
):
return [
"".join(
random.choice(string.ascii_lowercase)
for _ in range(random.randint(min_length, max_length))
)
for _ in range(num_strings)
]
def test_jaccard_index_random_strings():
# Seed the rng before random string generation.
random.seed(42)
num_strings = 100
jaccard_width = 5
common_strings = _make_list_of_strings_of_random_length(
num_strings, jaccard_width, 50
)
uncommon_strings1 = _make_list_of_strings_of_random_length(
num_strings, jaccard_width, 10
)
uncommon_strings2 = _make_list_of_strings_of_random_length(
num_strings, jaccard_width, 20
)
str1 = cudf.Series(uncommon_strings1).str.cat(cudf.Series(common_strings))
str2 = cudf.Series(uncommon_strings2).str.cat(cudf.Series(common_strings))
# adopted from https://github.com/rapidsai/rapids-deduplication/issues/36
da = str1.str.character_ngrams(jaccard_width, True)
db = str2.str.character_ngrams(jaccard_width, True)
da = da.list.unique()
db = db.list.unique()
da = da.explode()
db = db.explode()
da = da.to_frame()
db = db.to_frame()
da = da.reset_index()
db = db.reset_index()
da = da.rename(columns={0: "token"})
db = db.rename(columns={0: "token"})
db["match"] = 1
inter = da.merge(db, on=["index", "token"], how="left")
inter = inter.groupby("index")["match"].sum()
union = da.merge(db, on=["index", "token"], how="outer")
union = union.groupby("index").size()
res = inter / union
res.fillna(0, inplace=True)
res = res.sort_index()
res = res.values.astype("float32")
expected = cudf.Series(res)
actual = str1.str.jaccard_index(str2, jaccard_width)
assert_eq(expected, actual)
@pytest.mark.parametrize(
"separator, input, results",
[
(" ", "thetestsentence", "the test sent ence"),
("_", "sentenceistest", "sent_ence_is_test"),
("$", "istestsentencehere", "is$test$sent$ence$he$r$e"),
],
)
def test_byte_pair_encoding(separator, input, results):
pairs_table = cudf.Series(
[
"t he",
"h e",
"e n",
"i t",
"i s",
"e s",
"en t",
"c e",
"es t",
"en ce",
"t h",
"h i",
"th is",
"t est",
"s i",
"s ent",
]
)
encoder = BytePairEncoder(pairs_table)
strings = cudf.Series([input, None, "", input])
expected = cudf.Series([results, None, "", results])
actual = encoder(strings, separator)
assert type(expected) == type(actual)
assert_eq(expected, actual)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/text/test_subword_tokenizer.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import os
import cupy
import numpy as np
import pytest
import cudf
from cudf.core.subword_tokenizer import SubwordTokenizer
from cudf.testing._utils import assert_eq
@pytest.fixture(scope="module")
def datadir(datadir):
return os.path.join(datadir, "subword_tokenizer_data")
def assert_equal_tokenization_outputs(hf_output, cudf_output):
assert (
np.sum(hf_output["input_ids"] != cudf_output["input_ids"].get()) == 0
)
assert (
np.sum(
hf_output["attention_mask"] != cudf_output["attention_mask"].get()
)
== 0
)
@pytest.mark.parametrize("seq_len", [32, 64])
@pytest.mark.parametrize("stride", [0, 15, 30])
@pytest.mark.parametrize("add_special_tokens", [True, False])
@pytest.mark.parametrize("do_lower_case", [True, False])
def test_subword_tokenize(
seq_len, stride, add_special_tokens, do_lower_case, datadir
):
with open(
os.path.join(datadir, "test_sentences.txt"), encoding="utf-8"
) as file:
input_sentence_ls = [line.strip() for line in file]
vocab_dir = os.path.join(datadir, "bert_base_cased_sampled")
transformers = pytest.importorskip("transformers")
hf_tokenizer = transformers.BertTokenizer.from_pretrained(
vocab_dir, do_lower_case=do_lower_case
)
hf_output = hf_tokenizer(
input_sentence_ls,
max_length=seq_len,
stride=stride,
padding="max_length",
return_tensors="np",
truncation=True,
add_special_tokens=add_special_tokens,
)
vocab_hash = os.path.join(vocab_dir, "vocab-hash.txt")
str_series = cudf.Series(input_sentence_ls)
cudf_tokenizer = SubwordTokenizer(vocab_hash, do_lower_case=do_lower_case)
cudf_output = cudf_tokenizer(
str_series,
max_length=seq_len,
max_num_rows=len(str_series),
stride=stride,
padding="max_length",
return_tensors="cp",
truncation=True,
add_special_tokens=add_special_tokens,
)
assert_equal_tokenization_outputs(hf_output, cudf_output)
def test_subword_tokenize_with_truncation(datadir):
vocab_dir = os.path.join(datadir, "bert_base_cased_sampled")
vocab_hash = os.path.join(vocab_dir, "vocab-hash.txt")
str_series = cudf.Series(["Test error"])
cudf_tokenizer = SubwordTokenizer(vocab_hash)
error_msg = (
"Adding special tokens is not supported with truncation = False. "
"Custom Cupy kernel can potentially "
"be used to add it. For reference "
"see: _bert_add_special_tokens"
)
with pytest.raises(NotImplementedError, match=error_msg):
cudf_tokenizer(
str_series,
max_length=64,
max_num_rows=len(str_series),
truncation=False,
add_special_tokens=True,
)
def test_text_subword_tokenize(tmpdir):
sr = cudf.Series(
[
"This is a test",
"A test this is",
"Is test a this",
"Test test",
"this This",
]
)
hash_file = tmpdir.mkdir("nvtext").join("tmp_hashed_vocab.txt")
content = "1\n0\n23\n"
coefficients = [65559] * 23
for c in coefficients:
content = content + str(c) + " 0\n"
# based on values from the bert_hash_table.txt file for the
# test words used here: 'this' 'is' 'a' test'
table = [0] * 23
table[0] = 3015668
table[1] = 6205475701751155871
table[5] = 6358029
table[16] = 451412625363
table[20] = 6206321707968235495
content = content + "23\n"
for v in table:
content = content + str(v) + "\n"
content = content + "100\n101\n102\n\n"
hash_file.write(content)
cudf_tokenizer = SubwordTokenizer(hash_file)
token_d = cudf_tokenizer(
sr, 8, 8, add_special_tokens=False, truncation=True
)
tokens, masks, metadata = (
token_d["input_ids"],
token_d["attention_mask"],
token_d["metadata"],
)
expected_tokens = cupy.asarray(
[
2023,
2003,
1037,
3231,
0,
0,
0,
0,
1037,
3231,
2023,
2003,
0,
0,
0,
0,
2003,
3231,
1037,
2023,
0,
0,
0,
0,
3231,
3231,
0,
0,
0,
0,
0,
0,
2023,
2023,
0,
0,
0,
0,
0,
0,
],
dtype=np.uint32,
)
expected_tokens = expected_tokens.reshape(-1, 8)
assert_eq(expected_tokens, tokens)
expected_masks = cupy.asarray(
[
1,
1,
1,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
],
dtype=np.uint32,
)
expected_masks = expected_masks.reshape(-1, 8)
assert_eq(expected_masks, masks)
expected_metadata = cupy.asarray(
[0, 0, 3, 1, 0, 3, 2, 0, 3, 3, 0, 1, 4, 0, 1], dtype=np.uint32
)
expected_metadata = expected_metadata.reshape(-1, 3)
assert_eq(expected_metadata, metadata)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_parquet.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_avro.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_hdf5.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_csv.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_orc.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_text.py
|
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
from io import StringIO
import pytest
import cudf
from cudf.testing._utils import assert_eq
@pytest.fixture(scope="module")
def datadir(datadir):
return datadir / "text"
def test_read_text(datadir):
chess_file = str(datadir) + "/chess.pgn"
delimiter = "1."
with open(chess_file) as f:
content = f.read().split(delimiter)
# Since Python split removes the delimiter and read_text does
# not we need to add it back to the 'content'
expected = cudf.Series(
[
c + delimiter if i < (len(content) - 1) else c
for i, c in enumerate(content)
]
)
actual = cudf.read_text(chess_file, delimiter=delimiter)
assert_eq(expected, actual)
def test_read_text_byte_range(datadir):
chess_file = str(datadir) + "/chess.pgn"
delimiter = "1."
with open(chess_file, "r") as f:
data = f.read()
content = data.split(delimiter)
# Since Python split removes the delimiter and read_text does
# not we need to add it back to the 'content'
expected = cudf.Series(
[
c + delimiter if i < (len(content) - 1) else c
for i, c in enumerate(content)
]
)
byte_range_size = (len(data) // 3) + (len(data) % 3 != 0)
actual_0 = cudf.read_text(
chess_file,
delimiter=delimiter,
byte_range=[byte_range_size * 0, byte_range_size],
)
actual_1 = cudf.read_text(
chess_file,
delimiter=delimiter,
byte_range=[byte_range_size * 1, byte_range_size],
)
actual_2 = cudf.read_text(
chess_file,
delimiter=delimiter,
byte_range=[byte_range_size * 2, byte_range_size],
)
actual = cudf.concat([actual_0, actual_1, actual_2], ignore_index=True)
assert_eq(expected, actual)
def test_read_text_byte_range_large(tmpdir):
content = "".join(("\n" if x % 5 == 4 else "x") for x in range(0, 3000))
delimiter = "\n"
temp_file = str(tmpdir) + "/temp.txt"
with open(temp_file, "w") as f:
f.write(content)
expected = cudf.Series(["xxxx\n" for i in range(0, 200)])
actual = cudf.read_text(
temp_file, delimiter=delimiter, byte_range=[1000, 1000]
)
assert_eq(expected, actual)
def test_read_text_in_memory(datadir):
# Since Python split removes the delimiter and read_text does
# not we need to add it back to the 'content'
expected = cudf.Series(["x::", "y::", "z"])
actual = cudf.read_text(StringIO("x::y::z"), delimiter="::")
assert_eq(expected, actual)
def test_read_text_in_memory_strip_delimiter(datadir):
# Since Python split removes the delimiter and read_text does
# not we need to add it back to the 'content'
expected = cudf.Series(["x", "y", "z"])
actual = cudf.read_text(
StringIO("x::y::z"), delimiter="::", strip_delimiters=True
)
assert_eq(expected, actual)
def test_read_text_bgzip(datadir):
chess_file_compressed = str(datadir) + "/chess.pgn.gz"
chess_file = str(datadir) + "/chess.pgn"
delimiter = "1."
with open(chess_file) as f:
content = f.read().split(delimiter)
# Since Python split removes the delimiter and read_text does
# not we need to add it back to the 'content'
expected = cudf.Series(
[
c + delimiter if i < (len(content) - 1) else c
for i, c in enumerate(content)
]
)
actual = cudf.read_text(
chess_file_compressed, compression="bgzip", delimiter=delimiter
)
assert_eq(expected, actual)
def test_read_text_bgzip_offsets(datadir):
chess_file_compressed = str(datadir) + "/chess.pgn.gz"
chess_file = str(datadir) + "/chess.pgn"
delimiter = "1."
with open(chess_file) as f:
content = f.read()[29:695].split(delimiter)
# Since Python split removes the delimiter and read_text does
# not we need to add it back to the 'content'
expected = cudf.Series(
[
c + delimiter if i < (len(content) - 1) else c
for i, c in enumerate(content)
]
)
actual = cudf.read_text(
chess_file_compressed,
compression="bgzip",
compression_offsets=[58 * 2**16 + 2, 781 * 2**16 + 7],
delimiter=delimiter,
)
assert_eq(expected, actual)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_feather.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/tests
|
rapidsai_public_repos/cudf/python/cudf/cudf/tests/input_output/test_json.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/profiler.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import inspect
import operator
import pickle
import sys
import time
from collections import defaultdict
from typing import Union
from rich.console import Console
from rich.syntax import Syntax
from rich.table import Table
from .fast_slow_proxy import (
_FinalProxy,
_FunctionProxy,
_IntermediateProxy,
_MethodProxy,
)
# This text is used in contexts where the profiler is injected into the
# original code. The profiler is injected at the top of the cell, so the line
# numbers in the profiler results are offset by 2.
_profile_injection_text = """\
from cudf.pandas import Profiler
with Profiler() as profiler:
{original_lines}
# Patch the results to shift the line numbers back to the original before the
# profiler injection.
new_results = {{}}
for (lineno, currfile, line), v in profiler._results.items():
new_results[(lineno - 2, currfile, line)] = v
profiler._results = new_results
profiler.print_per_line_stats()
{function_profile_printer}
"""
_cpu_issue_text = """\
Not all pandas operations ran on the GPU. \
The following functions required CPU fallback:
{cpu_functions_used}
"""
def format_cpu_functions_used(cpu_funcs):
output_str = ""
for each in cpu_funcs:
output_str += f"- {each}\n"
# remove final newline character
output_str = output_str[:-1]
return output_str
def lines_with_profiling(lines, print_function_profile=False):
"""Inject profiling code into the given lines of code."""
cleaned_lines = "\n".join(
[(" " * 4) + line.replace("\t", " " * 4) for line in lines]
)
return _profile_injection_text.format(
original_lines=cleaned_lines,
function_profile_printer="profiler.print_per_function_stats()"
if print_function_profile
else "",
)
class Profiler:
_IGNORE_LIST = ["Profiler()", "settrace(None)"]
def __init__(self):
self._results = {}
# Map func-name to list of calls (was_fast, time)
self._per_func_results = defaultdict(lambda: defaultdict(list))
# Current fast_slow_function_call stack frame recording name
# and start time
self._call_stack = []
self._currkey = None
self._timer = {}
self._currfile = None
self.start_time = None
self.end_time = None
def __enter__(self, *args, **kwargs):
self.start_time = time.perf_counter()
self._oldtrace = sys.gettrace()
# Setting the global trace function with sys.settrace does not affect
# the current call stack, so in addition to this we must also set the
# current frame's f_trace attribute as done below.
sys.settrace(self._tracefunc)
# Following excerpt from:
# https://docs.python.org/3/library/sys.html#sys.settrace
# For more fine-grained usage, it is possible
# to set a trace function by assigning
# frame.f_trace = tracefunc explicitly, rather than
# relying on it being set indirectly via the return
# value from an already installed trace function
# Hence we need to perform `f_trace = self._tracefunc`
# we need to `f_back` because current frame will be
# of this file.
frame = inspect.currentframe().f_back
self._currfile = frame.f_code.co_filename
self._f_back_oldtrace = frame.f_trace
frame.f_trace = self._tracefunc
return self
def __exit__(self, *args, **kwargs):
sys.settrace(self._oldtrace)
inspect.currentframe().f_back.f_trace = self._f_back_oldtrace
self.end_time = time.perf_counter()
@staticmethod
def get_namespaced_function_name(
func_obj: Union[
_FunctionProxy,
_MethodProxy,
type[_FinalProxy],
type[_IntermediateProxy],
]
):
if isinstance(func_obj, _MethodProxy):
# Extract classname from method object
type_name = type(func_obj._fsproxy_wrapped.__self__).__name__
# Explicitly ask for __name__ on _fsproxy_wrapped to avoid
# getting a private attribute and forcing a slow-path copy
func_name = func_obj._fsproxy_wrapped.__name__
return ".".join([type_name, func_name])
elif isinstance(func_obj, _FunctionProxy) or issubclass(
func_obj, (_FinalProxy, _IntermediateProxy)
):
return func_obj.__name__
else:
raise NotImplementedError(
f"Don't know how to get namespaced name for {func_obj}"
)
def _tracefunc(self, frame, event, arg):
if event == "line" and frame.f_code.co_filename == self._currfile:
key = "".join(inspect.stack()[1].code_context)
if not any(
ignore_word in key for ignore_word in Profiler._IGNORE_LIST
):
self._currkey = (frame.f_lineno, self._currfile, key)
self._results.setdefault(self._currkey, {})
self._timer[self._currkey] = time.perf_counter()
elif (
event == "call"
and frame.f_code.co_name == "_fast_slow_function_call"
):
if self._currkey is not None:
self._timer[self._currkey] = time.perf_counter()
# Store per-function information for free functions and methods
frame_locals = inspect.getargvalues(frame).locals
if (
isinstance(
func_obj := frame_locals["args"][0],
(_MethodProxy, _FunctionProxy),
)
or isinstance(func_obj, type)
and issubclass(func_obj, (_FinalProxy, _IntermediateProxy))
):
func_name = self.get_namespaced_function_name(func_obj)
self._call_stack.append((func_name, time.perf_counter()))
elif (
event == "return"
and frame.f_code.co_name == "_fast_slow_function_call"
):
if self._currkey is not None and arg is not None:
if arg[1]: # fast
run_time = time.perf_counter() - self._timer[self._currkey]
self._results[self._currkey][
"gpu_time"
] = run_time + self._results[self._currkey].get(
"gpu_time", 0
)
else:
run_time = time.perf_counter() - self._timer[self._currkey]
self._results[self._currkey][
"cpu_time"
] = run_time + self._results[self._currkey].get(
"cpu_time", 0
)
frame_locals = inspect.getargvalues(frame).locals
if (
isinstance(
func_obj := frame_locals["args"][0],
(_MethodProxy, _FunctionProxy),
)
or isinstance(func_obj, type)
and issubclass(func_obj, (_FinalProxy, _IntermediateProxy))
):
func_name, start = self._call_stack.pop()
if arg is not None:
key = "gpu" if arg[1] else "cpu"
self._per_func_results[func_name][key].append(
time.perf_counter() - start
)
return self._tracefunc
@property
def per_line_stats(self):
list_data = []
for key, val in self._results.items():
cpu_time = val.get("cpu_time", 0)
gpu_time = val.get("gpu_time", 0)
line_no, _, line = key
list_data.append([line_no, line, gpu_time, cpu_time])
return sorted(list_data, key=operator.itemgetter(0))
@property
def per_function_stats(self):
return self._per_func_results
def print_per_line_stats(self):
table = Table()
table.add_column("Line no.")
table.add_column("Line")
table.add_column("GPU TIME(s)")
table.add_column("CPU TIME(s)")
for line_no, line, gpu_time, cpu_time in self.per_line_stats:
table.add_row(
str(line_no),
Syntax(str(line), "python"),
"" if gpu_time == 0 else "{:.9f}".format(gpu_time),
"" if cpu_time == 0 else "{:.9f}".format(cpu_time),
)
time_elapsed = self.end_time - self.start_time
table.title = f"""\n\
Total time elapsed: {time_elapsed:.3f} seconds
Stats
"""
console = Console()
console.print(table)
def print_per_function_stats(self):
cpu_funcs = []
n_gpu_func_calls = 0
n_cpu_func_calls = 0
total_gpu_time = 0
total_cpu_time = 0
table = Table()
for col in (
"Function",
"GPU ncalls",
"GPU cumtime",
"GPU percall",
"CPU ncalls",
"CPU cumtime",
"CPU percall",
):
table.add_column(col)
for func_name, func_data in self.per_function_stats.items():
gpu_times = func_data["gpu"]
cpu_times = func_data["cpu"]
table.add_row(
func_name,
f"{len(gpu_times)}",
f"{sum(gpu_times):.3f}",
f"{sum(gpu_times) / max(len(gpu_times), 1):.3f}",
f"{len(cpu_times)}",
f"{sum(cpu_times):.3f}",
f"{sum(cpu_times) / max(len(cpu_times), 1):.3f}",
)
total_gpu_time += sum(gpu_times)
total_cpu_time += sum(cpu_times)
n_gpu_func_calls += len(gpu_times)
n_cpu_func_calls += len(cpu_times)
if cpu_times and func_name not in cpu_funcs:
cpu_funcs.append(func_name)
time_elapsed = self.end_time - self.start_time
table.title = f"""\n\
Total time elapsed: {time_elapsed:.3f} seconds
{n_gpu_func_calls} GPU function calls in {total_gpu_time:.3f} seconds
{n_cpu_func_calls} CPU function calls in {total_cpu_time:.3f} seconds
Stats
"""
console = Console()
console.print(table)
if cpu_funcs:
call_to_action = (
"To request GPU support for any of these functions, "
"please file a Github issue here: "
"[link=https://github.com/rapidsai/cudf/issues/new?assignees"
"=&labels=%3F+-+Needs+Triage%2C+feature+request&projects="
"&template=pandas_function_request.md&title=%5BFEA%5D]"
"https://github.com/rapidsai/cudf/issues/new/choose"
"[/link]."
)
console.print(
_cpu_issue_text.format(
cpu_functions_used=format_cpu_functions_used(cpu_funcs)
)
)
console.print(call_to_action)
def dump_stats(self, file_name):
with open(file_name, "wb") as f:
pickle.dump(self, f)
def load_stats(file_name):
with open(file_name, "rb") as f:
return pickle.load(f)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/magics.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
try:
from IPython.core.magic import Magics, cell_magic, magics_class
from .profiler import Profiler, lines_with_profiling
@magics_class
class CudfPandasMagics(Magics):
@cell_magic("cudf.pandas.profile")
def profile(self, _, cell):
with Profiler() as profiler:
get_ipython().run_cell(cell) # noqa: F821
profiler.print_per_function_stats()
@cell_magic("cudf.pandas.line_profile")
def line_profile(self, _, cell):
new_cell = lines_with_profiling(cell.split("\n"))
get_ipython().run_cell(new_cell) # noqa: F821
def load_ipython_extension(ip):
from . import install
install()
ip.register_magics(CudfPandasMagics)
except ImportError:
def load_ipython_extension(ip):
pass
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/annotation.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
try:
import nvtx
except ImportError:
class nvtx: # type: ignore
"""Noop-stub with the same API as nvtx."""
push_range = lambda *args, **kwargs: None # noqa: E731
pop_range = lambda *args, **kwargs: None # noqa: E731
class annotate:
"""No-op annotation/context-manager"""
def __init__(
self,
message: str | None = None,
color: str | None = None,
domain: str | None = None,
category: str | int | None = None,
):
pass
def __enter__(self):
return self
def __exit__(self, *exc):
return False
__call__ = lambda self, fn: fn # noqa: E731
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/module_accelerator.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import contextlib
import functools
import importlib
import importlib.abc
import importlib.machinery
import os
import pathlib
import sys
import threading
import warnings
from abc import abstractmethod
from importlib._bootstrap import _ImportLockContext as ImportLock
from types import ModuleType
from typing import Any, ContextManager, Dict, List, NamedTuple
from typing_extensions import Self
from .fast_slow_proxy import (
_FunctionProxy,
_is_function_or_method,
_Unusable,
get_final_type_map,
get_intermediate_type_map,
get_registered_functions,
)
def rename_root_module(module: str, root: str, new_root: str) -> str:
"""
Rename a module to a new root.
Parameters
----------
module
Module to rename
root
Original root
new_root
New root
Returns
-------
New module name (if it matches root) otherwise original name.
"""
if module.startswith(root):
return new_root + module[len(root) :]
else:
return module
class DeducedMode(NamedTuple):
use_fast_lib: bool
slow_lib: str
fast_lib: str
def deduce_cudf_pandas_mode(slow_lib: str, fast_lib: str) -> DeducedMode:
"""
Determine if cudf.pandas should use the requested fast library.
Parameters
----------
slow_lib
Name of the slow library
fast_lib
Name of the fast library
Returns
-------
Whether the fast library is being used, and the resulting names of
the "slow" and "fast" libraries.
"""
if "CUDF_PANDAS_FALLBACK_MODE" not in os.environ:
try:
importlib.import_module(fast_lib)
return DeducedMode(
use_fast_lib=True, slow_lib=slow_lib, fast_lib=fast_lib
)
except Exception as e:
warnings.warn(
f"Exception encountered importing {fast_lib}: {e}."
f"Falling back to only using {slow_lib}."
)
return DeducedMode(
use_fast_lib=False, slow_lib=slow_lib, fast_lib=slow_lib
)
class ModuleAcceleratorBase(
importlib.abc.MetaPathFinder, importlib.abc.Loader
):
_instance: ModuleAcceleratorBase | None = None
mod_name: str
fast_lib: str
slow_lib: str
# When walking the module tree and wrapping module attributes,
# we often will come across the same object more than once. We
# don't want to create separate wrappers for each
# instance, so we keep a registry of all module attributes
# that we can look up to see if we have already wrapped an
# attribute before
_wrapped_objs: dict[Any, Any]
def __new__(
cls,
mod_name: str,
fast_lib: str,
slow_lib: str,
):
"""Build a custom module finder that will provide wrapped modules
on demand.
Parameters
----------
mod_name
Import name to deliver modules under.
fast_lib
Name of package that provides "fast" implementation
slow_lib
Name of package that provides "slow" fallback implementation
"""
if ModuleAcceleratorBase._instance is not None:
raise RuntimeError(
"Only one instance of ModuleAcceleratorBase allowed"
)
self = object.__new__(cls)
self.mod_name = mod_name
self.fast_lib = fast_lib
self.slow_lib = slow_lib
# When walking the module tree and wrapping module attributes,
# we often will come across the same object more than once. We
# don't want to create separate wrappers for each
# instance, so we keep a registry of all module attributes
# that we can look up to see if we have already wrapped an
# attribute before
self._wrapped_objs = {}
self._wrapped_objs.update(get_final_type_map())
self._wrapped_objs.update(get_intermediate_type_map())
self._wrapped_objs.update(get_registered_functions())
ModuleAcceleratorBase._instance = self
return self
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}"
f"(fast={self.fast_lib}, slow={self.slow_lib})"
)
def find_spec(
self, fullname: str, path, target=None
) -> importlib.machinery.ModuleSpec | None:
"""Provide ourselves as a module loader.
Parameters
----------
fullname
Name of module to be imported, if it starts with the name
that we are using to wrap, we will deliver ourselves as a
loader, otherwise defer to the standard Python loaders.
Returns
-------
A ModuleSpec with ourself as loader if we're interposing,
otherwise None to pass off to the next loader.
"""
if fullname == self.mod_name or fullname.startswith(
f"{self.mod_name}."
):
return importlib.machinery.ModuleSpec(
name=fullname,
loader=self,
# Note, this influences the repr of the module, so we may want
# to change it if we ever want to control that.
origin=None,
loader_state=None,
is_package=True,
)
return None
def create_module(self, spec) -> ModuleType | None:
return None
def exec_module(self, mod: ModuleType):
# importlib calls this function with the global import lock held.
self._populate_module(mod)
@abstractmethod
def disabled(self) -> ContextManager:
pass
def _postprocess_module(
self,
mod: ModuleType,
slow_mod: ModuleType,
fast_mod: ModuleType | None,
) -> ModuleType:
"""Ensure that the wrapped module satisfies required invariants.
Parameters
----------
mod
Wrapped module to postprocess
slow_mod
Slow version that we are mimicking
fast_mod
Fast module that provides accelerated implementations (may
be None
Returns
-------
Checked and validated module
Notes
-----
The implementation of fast-slow proxies imposes certain
requirements on the wrapped modules that it delivers. This
function encodes those requirements and raises if the module
does not satisfy them.
This post-processing routine should be kept up to date with any
requirements encoded by fast_slow_proxy.py
"""
mod.__dict__["_fsproxy_slow"] = slow_mod
if fast_mod is not None:
mod.__dict__["_fsproxy_fast"] = fast_mod
return mod
@abstractmethod
def _populate_module(self, mod: ModuleType) -> ModuleType:
"""Populate given module with appropriate attributes.
This traverses the attributes of the slow module corresponding
to mod and mirrors those in the provided module in a wrapped
mode that attempts to execute them using the fast module first.
Parameters
----------
mod
Module to populate
Returns
-------
ModuleType
Populated module
Notes
-----
In addition to the attributes of the slow module,
the returned module must have the following attributes:
- '_fsproxy_slow': the corresponding slow module
- '_fsproxy_fast': the corresponding fast module
This is necessary for correct rewriting of UDFs when calling
to the respective fast/slow libraries.
The necessary invariants are checked and applied in
:meth:`_postprocess_module`.
"""
pass
def _wrap_attribute(
self,
slow_attr: Any,
fast_attr: Any | _Unusable,
name: str,
) -> Any:
"""
Return the wrapped version of an attribute.
Parameters
----------
slow_attr : Any
The attribute from the slow module
fast_mod : Any (or None)
The same attribute from the fast module, if it exists
name
Name of attribute
Returns
-------
Wrapped attribute
"""
wrapped_attr: Any
# TODO: what else should we make sure not to get from the fast
# library?
if name in {"__all__", "__dir__", "__file__", "__doc__"}:
wrapped_attr = slow_attr
elif self.fast_lib == self.slow_lib:
# no need to create a fast-slow wrapper
wrapped_attr = slow_attr
if any(
[
slow_attr in get_registered_functions(),
slow_attr in get_final_type_map(),
slow_attr in get_intermediate_type_map(),
]
):
# attribute already registered in self._wrapped_objs
return self._wrapped_objs[slow_attr]
if isinstance(slow_attr, ModuleType) and slow_attr.__name__.startswith(
self.slow_lib
):
# attribute is a submodule of the slow library,
# replace the string "{slow_lib}" in the submodule's
# name with "{self.mod_name}"
# now, attempt to import the wrapped module, which will
# recursively wrap all of its attributes:
return importlib.import_module(
rename_root_module(
slow_attr.__name__, self.slow_lib, self.mod_name
)
)
if slow_attr in self._wrapped_objs:
if type(fast_attr) is _Unusable:
# we don't want to replace a wrapped object that
# has a usable fast object with a wrapped object
# with a an unusable fast object.
return self._wrapped_objs[slow_attr]
if _is_function_or_method(slow_attr):
wrapped_attr = _FunctionProxy(fast_attr, slow_attr)
else:
wrapped_attr = slow_attr
return wrapped_attr
@classmethod
@abstractmethod
def install(
cls, destination_module: str, fast_lib: str, slow_lib: str
) -> Self | None:
"""
Install the loader in sys.meta_path.
Parameters
----------
destination_module
Name under which the importer will kick in
fast_lib
Name of fast module
slow_lib
Name of slow module we are trying to mimic
Returns
-------
Instance of the class (or None if the loader was not installed)
Notes
-----
This function is idempotent. If called with the same arguments
a second time, it does not create a new loader, but instead
returns the existing loader from ``sys.meta_path``.
"""
pass
class ModuleAccelerator(ModuleAcceleratorBase):
"""
A finder and loader that produces "accelerated" modules.
When someone attempts to import the specified slow library with
this finder enabled, we intercept the import and deliver an
equivalent, accelerated, version of the module. This provides
attributes and modules that check if they are being used from
"within" the slow (or fast) library themselves. If this is the
case, the implementation is forwarded to the actual slow library
implementation, otherwise a proxy implementation is used (which
attempts to call the fast version first).
"""
_denylist: List[str]
_use_fast_lib: bool
_use_fast_lib_lock: threading.RLock
_module_cache_prefix: str = "_slow_lib_"
# TODO: Add possibility for either an explicit allow-list of
# libraries where the slow_lib should be wrapped, or, more likely
# a block-list that adds to the set of libraries where no proxying occurs.
def __new__(
cls,
fast_lib,
slow_lib,
):
self = super().__new__(
cls,
slow_lib,
fast_lib,
slow_lib,
)
# Import the real versions of the modules so that we can
# rewrite the sys.modules cache.
slow_module = importlib.import_module(slow_lib)
fast_module = importlib.import_module(fast_lib)
# Note, this is not thread safe, but install() below grabs the
# lock for the whole initialisation and modification of
# sys.meta_path.
for mod in sys.modules.copy():
if mod.startswith(self.slow_lib):
sys.modules[self._module_cache_prefix + mod] = sys.modules[mod]
del sys.modules[mod]
self._denylist = [*slow_module.__path__, *fast_module.__path__]
# Lock to manage temporarily disabling delivering wrapped attributes
self._use_fast_lib_lock = threading.RLock()
self._use_fast_lib = True
return self
def _populate_module(self, mod: ModuleType):
mod_name = mod.__name__
# Here we attempt to import "_fsproxy_slow_lib.x.y.z", but
# "_fsproxy_slow_lib" does not exist anywhere as a real file, so
# how does this work?
# The importer attempts to import ".z" by first importing
# "_fsproxy_slow_lib.x.y", this recurses until we find
# "_fsproxy_slow_lib.x" (say), which does exist because we set that up
# in __init__. Now the importer looks at the __path__
# attribute of "x" and uses that to find the relative location
# to look for "y". This __path__ points to the real location
# of "slow_lib.x". So, as long as we rewire the _already imported_
# slow_lib modules in sys.modules to _fsproxy_slow_lib, when we
# get here this will find the right thing.
# The above exposition is for lazily imported submodules (e.g.
# avoiding circular imports by putting an import at function
# level). For everything that is eagerly imported when we do
# "import slow_lib" this import line is trivial because we
# immediately pull the correct result out of sys.modules.
slow_mod = importlib.import_module(
rename_root_module(
mod_name,
self.slow_lib,
self._module_cache_prefix + self.slow_lib,
)
)
try:
fast_mod = importlib.import_module(
rename_root_module(mod_name, self.slow_lib, self.fast_lib)
)
except Exception:
fast_mod = None
# The version that will be used if called within a denylist
# package
real_attributes = {}
# The version that will be used outside denylist packages
for key in slow_mod.__dir__():
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
slow_attr = getattr(slow_mod, key)
fast_attr = getattr(fast_mod, key, _Unusable())
real_attributes[key] = slow_attr
try:
wrapped_attr = self._wrap_attribute(slow_attr, fast_attr, key)
self._wrapped_objs[slow_attr] = wrapped_attr
except TypeError:
# slow_attr is not hashable
pass
# Our module has (basically) no static attributes and instead
# always delivers them dynamically where the behaviour is
# dependent on the calling module.
setattr(
mod,
"__getattr__",
functools.partial(
self.getattr_real_or_wrapped,
real=real_attributes,
wrapped_objs=self._wrapped_objs,
loader=self,
),
)
# ...but, we want to pretend like we expose the same attributes
# as the equivalent slow module
setattr(mod, "__dir__", slow_mod.__dir__)
# We set __path__ to the real path so that importers like
# jinja2.PackageLoader("slow_mod") work correctly.
if getattr(slow_mod, "__path__", False):
assert mod.__spec__
mod.__path__ = slow_mod.__path__
mod.__spec__.submodule_search_locations = [*slow_mod.__path__]
return self._postprocess_module(mod, slow_mod, fast_mod)
@contextlib.contextmanager
def disabled(self):
"""Return a context manager for disabling the module accelerator.
Within the block, any wrapped objects will instead deliver
attributes from their real counterparts (as if the current
nested block were in the denylist).
Returns
-------
Context manager for disabling things
"""
try:
self._use_fast_lib_lock.acquire()
# The same thread might enter this context manager
# multiple times, so we need to remember the previous
# value
saved = self._use_fast_lib
self._use_fast_lib = False
yield
finally:
self._use_fast_lib = saved
self._use_fast_lib_lock.release()
@staticmethod
def getattr_real_or_wrapped(
name: str,
*,
real: Dict[str, Any],
wrapped_objs,
loader: ModuleAccelerator,
) -> Any:
"""
Obtain an attribute from a module from either the real or
wrapped namespace.
Parameters
----------
name
Attribute to return
real
Unwrapped "original" attributes
wrapped
Wrapped attributes
loader
Loader object that manages denylist and other skipping
Returns
-------
The requested attribute (either real or wrapped)
"""
with loader._use_fast_lib_lock:
# Have to hold the lock to read this variable since
# another thread might modify it.
# Modification has to happen with the lock held for the
# duration, so if someone else has modified things, then
# we block trying to acquire the lock (hence it is safe to
# release the lock after reading this value)
use_real = not loader._use_fast_lib
if not use_real:
# Only need to check the denylist if we're not turned off.
frame = sys._getframe()
# We cannot possibly be at the top level.
assert frame.f_back
calling_module = pathlib.PurePath(frame.f_back.f_code.co_filename)
use_real = any(
calling_module.is_relative_to(path)
for path in loader._denylist
)
try:
if use_real:
return real[name]
else:
return wrapped_objs[real[name]]
except KeyError:
raise AttributeError(f"No attribute '{name}'")
except TypeError:
# real[name] is an unhashable type
return real[name]
@classmethod
def install(
cls,
destination_module: str,
fast_lib: str,
slow_lib: str,
) -> Self | None:
# This grabs the global _import_ lock to avoid concurrent
# threads modifying sys.modules.
# We also make sure that we finish installing ourselves in
# sys.meta_path before releasing the lock so that there isn't
# a race between our modification of sys.modules and someone
# else importing the slow_lib before we have added ourselves
# to the meta_path
with ImportLock():
if destination_module != slow_lib:
raise RuntimeError(
f"Destination module '{destination_module}' must match"
f"'{slow_lib}' for this to work."
)
mode = deduce_cudf_pandas_mode(slow_lib, fast_lib)
if mode.use_fast_lib:
importlib.import_module(
f".._wrappers.{mode.slow_lib}", __name__
)
try:
(self,) = (
p
for p in sys.meta_path
if isinstance(p, cls)
and p.slow_lib == mode.slow_lib
and p.fast_lib == mode.fast_lib
)
except ValueError:
self = cls(mode.fast_lib, mode.slow_lib)
sys.meta_path.insert(0, self)
return self
def disable_module_accelerator() -> contextlib.ExitStack:
"""
Temporarily disable any module acceleration.
"""
with contextlib.ExitStack() as stack:
for finder in sys.meta_path:
if isinstance(finder, ModuleAcceleratorBase):
stack.enter_context(finder.disabled())
return stack.pop_all()
assert False # pacify type checker
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/fast_slow_proxy.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import functools
import inspect
import operator
import pickle
import types
from collections.abc import Iterator
from enum import IntEnum
from typing import (
Any,
Callable,
Dict,
Literal,
Mapping,
Optional,
Set,
Tuple,
Type,
)
from .annotation import nvtx
_CUDF_PANDAS_NVTX_COLORS = {
"COPY_SLOW_TO_FAST": 0xCA0020,
"COPY_FAST_TO_SLOW": 0xF4A582,
"EXECUTE_FAST": 0x92C5DE,
"EXECUTE_SLOW": 0x0571B0,
}
_WRAPPER_ASSIGNMENTS = tuple(
attr
for attr in functools.WRAPPER_ASSIGNMENTS
# Skip __doc__ because we assign it on class creation using exec_body
# callable that updates the namespace of the class.
# Skip __annotations__ because there are differences between Python
# versions on how it is initialized for a class that doesn't explicitly
# define it and we don't want to force eager evaluation of anything that
# would normally be lazy (mostly for consistency, shouldn't cause any
# significant issues).
if attr not in ("__annotations__", "__doc__")
)
def callers_module_name():
# Call f_back twice since this function adds an extra frame
return inspect.currentframe().f_back.f_back.f_globals["__name__"]
class _State(IntEnum):
"""Simple enum to track the type of wrapped object of a final proxy"""
SLOW = 0
FAST = 1
class _Unusable:
"""
A totally unusable type. When a "fast" object is not available,
it's useful to set it to _Unusable() so that any operations
on it fail, and ensure fallback to the corresponding
"slow" object.
"""
def __call__(self, *args: Any, **kwds: Any) -> Any:
raise NotImplementedError(
"Fast implementation not available. "
"Falling back to the slow implementation"
)
def __getattribute__(self, name: str) -> Any:
if name in {"__class__"}: # needed for type introspection
return super().__getattribute__(name)
raise TypeError("Unusable type. Falling back to the slow object")
class _PickleConstructor:
"""A pickleable object to support construction in __reduce__.
This object is used to avoid having unpickling call __init__ on the
objects, instead only invoking __new__. __init__ may have required
arguments or otherwise perform invalid initialization that we could skip
altogether since we're going to overwrite the wrapped object.
"""
def __init__(self, type_):
self._type = type_
def __call__(self):
return object.__new__(self._type)
_DELETE = object()
def make_final_proxy_type(
name: str,
fast_type: type,
slow_type: type,
*,
fast_to_slow: Callable,
slow_to_fast: Callable,
module: Optional[str] = None,
additional_attributes: Mapping[str, Any] | None = None,
postprocess: Callable[[_FinalProxy, Any, Any], Any] | None = None,
bases: Tuple = (),
) -> Type[_FinalProxy]:
"""
Defines a fast-slow proxy type for a pair of "final" fast and slow
types. Final types are types for which known operations exist for
converting an object of "fast" type to "slow" and vice-versa.
Parameters
----------
name: str
The name of the class returned
fast_type: type
slow_type: type
fast_to_slow: callable
Function that accepts a single argument of type `fast_type`
and returns an object of type `slow_type`
slow_to_fast: callable
Function that accepts a single argument of type `slow_type`
and returns an object of type `fast_type`
additional_attributes
Mapping of additional attributes to add to the class
(optional), these will override any defaulted attributes (e.g.
``__init__`). If you want to remove a defaulted attribute
completely, pass the special sentinel ``_DELETE`` as a value.
postprocess
Optional function called to allow the proxy to postprocess
itself when being wrapped up, called with the proxy object,
the unwrapped result object, and the function that was used to
construct said unwrapped object. See also `_maybe_wrap_result`.
bases
Optional tuple of base classes to insert into the mro.
Notes
-----
As a side-effect, this function adds `fast_type` and `slow_type`
to a global mapping of final types to their corresponding proxy
types, accessible via `get_final_type_map()`.
"""
def __init__(self, *args, **kwargs):
_fast_slow_function_call(
lambda cls, args, kwargs: setattr(
self, "_fsproxy_wrapped", cls(*args, **kwargs)
),
type(self),
args,
kwargs,
)
@nvtx.annotate(
"COPY_SLOW_TO_FAST",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_SLOW_TO_FAST"],
domain="cudf_pandas",
)
def _fsproxy_slow_to_fast(self):
# if we are wrapping a slow object,
# convert it to a fast one
if self._fsproxy_state is _State.SLOW:
return slow_to_fast(self._fsproxy_wrapped)
return self._fsproxy_wrapped
@nvtx.annotate(
"COPY_FAST_TO_SLOW",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_FAST_TO_SLOW"],
domain="cudf_pandas",
)
def _fsproxy_fast_to_slow(self):
# if we are wrapping a fast object,
# convert it to a slow one
if self._fsproxy_state is _State.FAST:
return fast_to_slow(self._fsproxy_wrapped)
return self._fsproxy_wrapped
@property # type: ignore
def _fsproxy_state(self) -> _State:
return (
_State.FAST
if isinstance(self._fsproxy_wrapped, self._fsproxy_fast_type)
else _State.SLOW
)
def __reduce__(self):
# Need a local import to avoid circular import issues
from .module_accelerator import disable_module_accelerator
with disable_module_accelerator():
pickled_wrapped_obj = pickle.dumps(self._fsproxy_wrapped)
return (_PickleConstructor(type(self)), (), pickled_wrapped_obj)
def __setstate__(self, state):
# Need a local import to avoid circular import issues
from .module_accelerator import disable_module_accelerator
with disable_module_accelerator():
unpickled_wrapped_obj = pickle.loads(state)
self._fsproxy_wrapped = unpickled_wrapped_obj
slow_dir = dir(slow_type)
cls_dict = {
"__init__": __init__,
"__doc__": inspect.getdoc(slow_type),
"_fsproxy_slow_dir": slow_dir,
"_fsproxy_fast_type": fast_type,
"_fsproxy_slow_type": slow_type,
"_fsproxy_slow_to_fast": _fsproxy_slow_to_fast,
"_fsproxy_fast_to_slow": _fsproxy_fast_to_slow,
"_fsproxy_state": _fsproxy_state,
"__reduce__": __reduce__,
"__setstate__": __setstate__,
}
if additional_attributes is None:
additional_attributes = {}
for method in _SPECIAL_METHODS:
if getattr(slow_type, method, False):
cls_dict[method] = _FastSlowAttribute(method)
for k, v in additional_attributes.items():
if v is _DELETE and k in cls_dict:
del cls_dict[k]
elif v is not _DELETE:
cls_dict[k] = v
cls = types.new_class(
name,
(*bases, _FinalProxy),
{"metaclass": _FastSlowProxyMeta},
lambda ns: ns.update(cls_dict),
)
functools.update_wrapper(
cls,
slow_type,
assigned=_WRAPPER_ASSIGNMENTS,
updated=(),
)
cls.__module__ = module if module is not None else callers_module_name()
final_type_map = get_final_type_map()
if fast_type is not _Unusable:
final_type_map[fast_type] = cls
final_type_map[slow_type] = cls
return cls
def make_intermediate_proxy_type(
name: str,
fast_type: type,
slow_type: type,
*,
module: Optional[str] = None,
) -> Type[_IntermediateProxy]:
"""
Defines a proxy type for a pair of "intermediate" fast and slow
types. Intermediate types are the types of the results of
operations invoked on final types.
As a side-effect, this function adds `fast_type` and `slow_type`
to a global mapping of intermediate types to their corresponding
proxy types, accessible via `get_intermediate_type_map()`.
Parameters
----------
name: str
The name of the class returned
fast_type: type
slow_type: type
"""
def __init__(self, *args, **kwargs):
# disallow __init__. An intermediate proxy type can only be
# instantiated from (possibly chained) operations on a final
# proxy type.
raise TypeError(
f"Cannot directly instantiate object of type {type(self)}"
)
@property # type: ignore
def _fsproxy_state(self):
return (
_State.FAST
if isinstance(self._fsproxy_wrapped, self._fsproxy_fast_type)
else _State.SLOW
)
@nvtx.annotate(
"COPY_SLOW_TO_FAST",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_SLOW_TO_FAST"],
domain="cudf_pandas",
)
def _fsproxy_slow_to_fast(self):
if self._fsproxy_state is _State.SLOW:
return super(type(self), self)._fsproxy_slow_to_fast()
return self._fsproxy_wrapped
@nvtx.annotate(
"COPY_FAST_TO_SLOW",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_FAST_TO_SLOW"],
domain="cudf_pandas",
)
def _fsproxy_fast_to_slow(self):
if self._fsproxy_state is _State.FAST:
return super(type(self), self)._fsproxy_fast_to_slow()
return self._fsproxy_wrapped
slow_dir = dir(slow_type)
cls_dict = {
"__init__": __init__,
"__doc__": inspect.getdoc(slow_type),
"_fsproxy_slow_dir": slow_dir,
"_fsproxy_fast_type": fast_type,
"_fsproxy_slow_type": slow_type,
"_fsproxy_slow_to_fast": _fsproxy_slow_to_fast,
"_fsproxy_fast_to_slow": _fsproxy_fast_to_slow,
"_fsproxy_state": _fsproxy_state,
}
for method in _SPECIAL_METHODS:
if getattr(slow_type, method, False):
cls_dict[method] = _FastSlowAttribute(method)
cls = types.new_class(
name,
(_IntermediateProxy,),
{"metaclass": _FastSlowProxyMeta},
lambda ns: ns.update(cls_dict),
)
functools.update_wrapper(
cls,
slow_type,
assigned=_WRAPPER_ASSIGNMENTS,
updated=(),
)
cls.__module__ = module if module is not None else callers_module_name()
intermediate_type_map = get_intermediate_type_map()
if fast_type is not _Unusable:
intermediate_type_map[fast_type] = cls
intermediate_type_map[slow_type] = cls
return cls
def register_proxy_func(slow_func: Callable):
"""
Decorator to register custom function as a proxy for slow_func.
Parameters
----------
slow_func: Callable
The function to register a wrapper for.
Returns
-------
Callable
"""
def wrapper(func):
registered_functions = get_registered_functions()
registered_functions[slow_func] = func
functools.update_wrapper(func, slow_func)
return func
return wrapper
@functools.lru_cache(maxsize=None)
def get_final_type_map():
"""
Return the mapping of all known fast and slow final types to their
corresponding proxy types.
"""
return dict()
@functools.lru_cache(maxsize=None)
def get_intermediate_type_map():
"""
Return a mapping of all known fast and slow intermediate types to their
corresponding proxy types.
"""
return dict()
@functools.lru_cache(maxsize=None)
def get_registered_functions():
return dict()
def _raise_attribute_error(obj, name):
"""
Raise an AttributeError with a message that is consistent with
the error raised by Python for a non-existent attribute on a
proxy object.
"""
raise AttributeError(f"'{obj}' object has no attribute '{name}'")
class _FastSlowAttribute:
"""
A descriptor type used to define attributes of fast-slow proxies.
"""
def __init__(self, name: str):
self._name = name
def __get__(self, obj, owner=None) -> Any:
if obj is None:
# class attribute
obj = owner
if not (
isinstance(obj, _FastSlowProxy)
or issubclass(type(obj), _FastSlowProxyMeta)
):
# we only want to look up attributes on the underlying
# fast/slow objects for instances of _FastSlowProxy or
# subtypes of _FastSlowProxyMeta:
_raise_attribute_error(owner if owner else obj, self._name)
result, _ = _fast_slow_function_call(getattr, obj, self._name)
if isinstance(result, functools.cached_property):
# TODO: temporary workaround until dask is able
# to correctly inspect cached_property objects.
# GH: 264
result = property(result.func)
if isinstance(result, (_MethodProxy, property)):
from .module_accelerator import disable_module_accelerator
type_ = owner if owner else type(obj)
slow_result_type = getattr(type_._fsproxy_slow, self._name)
with disable_module_accelerator():
result.__doc__ = inspect.getdoc( # type: ignore
slow_result_type
)
if isinstance(result, _MethodProxy):
# Note that this will produce the wrong result for bound
# methods because dir for the method won't be the same as for
# the pure unbound function, but the alternative is
# materializing the slow object when we don't really want to.
result._fsproxy_slow_dir = dir(
slow_result_type
) # type: ignore
return result
class _FastSlowProxyMeta(type):
"""
Metaclass used to dynamically find class attributes and
classmethods of fast-slow proxy types.
"""
@property
def _fsproxy_slow(self) -> type:
return self._fsproxy_slow_type
@property
def _fsproxy_fast(self) -> type:
return self._fsproxy_fast_type
def __dir__(self):
# Try to return the cached dir of the slow object, but if it
# doesn't exist, fall back to the default implementation.
try:
return self._fsproxy_slow_dir
except AttributeError:
return type.__dir__(self)
def __getattr__(self, name: str) -> Any:
if name.startswith("_fsproxy") or name.startswith("__"):
# an AttributeError was raised when trying to evaluate
# an internal attribute, we just need to propagate this
_raise_attribute_error(self.__class__.__name__, name)
attr = _FastSlowAttribute(name)
return attr.__get__(None, owner=self)
def __subclasscheck__(self, __subclass: type) -> bool:
if super().__subclasscheck__(__subclass):
return True
if hasattr(__subclass, "_fsproxy_slow"):
return issubclass(__subclass._fsproxy_slow, self._fsproxy_slow)
return False
def __instancecheck__(self, __instance: Any) -> bool:
if super().__instancecheck__(__instance):
return True
elif hasattr(type(__instance), "_fsproxy_slow"):
return issubclass(type(__instance), self)
return False
class _FastSlowProxy:
"""
Base class for all fast=slow proxy types.
A fast-slow proxy is proxy for a pair of types that provide "fast"
and "slow" implementations of the same API. At any time, a
fast-slow proxy wraps an object of either "fast" type, or "slow"
type. Operations invoked on the fast-slow proxy are first
delegated to the "fast" type, and if that fails, to the "slow"
type.
"""
_fsproxy_wrapped: Any
def _fsproxy_fast_to_slow(self) -> Any:
"""
If the wrapped object is of "fast" type, returns the
corresponding "slow" object. Otherwise, returns the wrapped
object as-is.
"""
raise NotImplementedError("Abstract base class")
def _fsproxy_slow_to_fast(self) -> Any:
"""
If the wrapped object is of "slow" type, returns the
corresponding "fast" object. Otherwise, returns the wrapped
object as-is.
"""
raise NotImplementedError("Abstract base class")
@property
def _fsproxy_fast(self) -> Any:
"""
Returns the wrapped object. If the wrapped object is of "slow"
type, replaces it with the corresponding "fast" object before
returning it.
"""
self._fsproxy_wrapped = self._fsproxy_slow_to_fast()
return self._fsproxy_wrapped
@property
def _fsproxy_slow(self) -> Any:
"""
Returns the wrapped object. If the wrapped object is of "fast"
type, replaces it with the corresponding "slow" object before
returning it.
"""
self._fsproxy_wrapped = self._fsproxy_fast_to_slow()
return self._fsproxy_wrapped
def __dir__(self):
# Try to return the cached dir of the slow object, but if it
# doesn't exist, fall back to the default implementation.
try:
return self._fsproxy_slow_dir
except AttributeError:
return object.__dir__(self)
def __getattr__(self, name: str) -> Any:
if name.startswith("_fsproxy"):
# an AttributeError was raised when trying to evaluate
# an internal attribute, we just need to propagate this
_raise_attribute_error(self.__class__.__name__, name)
if name in {
"_ipython_canary_method_should_not_exist_",
"_ipython_display_",
"_repr_mimebundle_",
# Workaround for https://github.com/numpy/numpy/issues/5350
# see GH:216 for details
"__array_struct__",
}:
# IPython always looks for these names in its display
# logic. See #GH:70 and #GH:172 for more details but the
# gist is that not raising an AttributeError immediately
# results in slow display in IPython (since the fast
# object will be copied to the slow one to look for
# attributes there which then also won't exist).
# This is somewhat delicate to the order in which IPython
# implements special display fallbacks.
_raise_attribute_error(self.__class__.__name__, name)
if name.startswith("_"):
# private attributes always come from `._fsproxy_slow`:
return getattr(self._fsproxy_slow, name)
attr = _FastSlowAttribute(name)
return attr.__get__(self)
def __setattr__(self, name, value):
if name.startswith("_"):
object.__setattr__(self, name, value)
return
return _FastSlowAttribute("__setattr__").__get__(self)(name, value)
def __add__(self, other):
return _fast_slow_function_call(operator.add, self, other)[0]
def __radd__(self, other):
return _fast_slow_function_call(operator.add, other, self)[0]
def __sub__(self, other):
return _fast_slow_function_call(operator.sub, self, other)[0]
def __rsub__(self, other):
return _fast_slow_function_call(operator.sub, other, self)[0]
def __mul__(self, other):
return _fast_slow_function_call(operator.mul, self, other)[0]
def __rmul__(self, other):
return _fast_slow_function_call(operator.mul, other, self)[0]
def __truediv__(self, other):
return _fast_slow_function_call(operator.truediv, self, other)[0]
def __rtruediv__(self, other):
return _fast_slow_function_call(operator.truediv, other, self)[0]
def __floordiv__(self, other):
return _fast_slow_function_call(operator.floordiv, self, other)[0]
def __rfloordiv__(self, other):
return _fast_slow_function_call(operator.floordiv, other, self)[0]
def __mod__(self, other):
return _fast_slow_function_call(operator.mod, self, other)[0]
def __rmod__(self, other):
return _fast_slow_function_call(operator.mod, other, self)[0]
def __divmod__(self, other):
return _fast_slow_function_call(divmod, self, other)[0]
def __rdivmod__(self, other):
return _fast_slow_function_call(divmod, other, self)[0]
def __pow__(self, other):
return _fast_slow_function_call(operator.pow, self, other)[0]
def __rpow__(self, other):
return _fast_slow_function_call(operator.pow, other, self)[0]
def __lshift__(self, other):
return _fast_slow_function_call(operator.lshift, self, other)[0]
def __rlshift__(self, other):
return _fast_slow_function_call(operator.lshift, other, self)[0]
def __rshift__(self, other):
return _fast_slow_function_call(operator.rshift, self, other)[0]
def __rrshift__(self, other):
return _fast_slow_function_call(operator.rshift, other, self)[0]
def __and__(self, other):
return _fast_slow_function_call(operator.and_, self, other)[0]
def __rand__(self, other):
return _fast_slow_function_call(operator.and_, other, self)[0]
def __xor__(self, other):
return _fast_slow_function_call(operator.xor, self, other)[0]
def __rxor__(self, other):
return _fast_slow_function_call(operator.xor, other, self)[0]
def __or__(self, other):
return _fast_slow_function_call(operator.or_, self, other)[0]
def __ror__(self, other):
return _fast_slow_function_call(operator.or_, other, self)[0]
def __matmul__(self, other):
return _fast_slow_function_call(operator.matmul, self, other)[0]
def __rmatmul__(self, other):
return _fast_slow_function_call(operator.matmul, other, self)[0]
class _FinalProxy(_FastSlowProxy):
"""
Proxy type for a pair of fast and slow "final" types for which
there is a known conversion from fast to slow, and vice-versa.
The conversion between fast and slow types is done using
user-provided conversion functions.
Do not attempt to use this class directly. Instead, use
`make_final_proxy_type` to create subtypes.
"""
@classmethod
def _fsproxy_wrap(cls, value, func):
"""Default mechanism to wrap a value in a proxy type
Parameters
----------
cls
The proxy type
value
The value to wrap up
func
The function called that constructed value
Returns
-------
A new proxied object
Notes
-----
_FinalProxy subclasses can override this classmethod if they
need particular behaviour when wrapped up.
"""
proxy = object.__new__(cls)
proxy._fsproxy_wrapped = value
return proxy
class _IntermediateProxy(_FastSlowProxy):
"""
Proxy type for a pair of "intermediate" types that appear as
intermediate values when invoking operations on "final" types.
The conversion between fast and slow types is done by keeping
track of the sequence of operations that created the wrapped
object, and "playing back" that sequence starting from the "slow"
version of the originating _FinalProxy.
Do not attempt to use this class directly. Instead, use
`make_intermediate_proxy_type` to create subtypes.
"""
_method_chain: Tuple[Callable, Tuple, Dict]
@classmethod
def _fsproxy_wrap(
cls,
obj: Any,
method_chain: Tuple[Callable, Tuple, Dict],
):
"""
Parameters
----------
obj: The object to wrap
method_chain: A tuple of the form (func, args, kwargs) where
`func` is the function that was called to create `obj`,
and `args` and `kwargs` are the arguments that were passed
to `func`.
"""
proxy = object.__new__(cls)
proxy._fsproxy_wrapped = obj
proxy._method_chain = method_chain
return proxy
@nvtx.annotate(
"COPY_SLOW_TO_FAST",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_SLOW_TO_FAST"],
domain="cudf_pandas",
)
def _fsproxy_slow_to_fast(self) -> Any:
func, args, kwargs = self._method_chain
args, kwargs = _fast_arg(args), _fast_arg(kwargs)
return func(*args, **kwargs)
@nvtx.annotate(
"COPY_FAST_TO_SLOW",
color=_CUDF_PANDAS_NVTX_COLORS["COPY_FAST_TO_SLOW"],
domain="cudf_pandas",
)
def _fsproxy_fast_to_slow(self) -> Any:
func, args, kwargs = self._method_chain
args, kwargs = _slow_arg(args), _slow_arg(kwargs)
return func(*args, **kwargs)
class _CallableProxyMixin:
"""
Mixin class that implements __call__ for fast-slow proxies.
"""
# For wrapped callables isinstance(self, FunctionType) should return True
__class__ = types.FunctionType # type: ignore
def __call__(self, *args, **kwargs) -> Any:
result, _ = _fast_slow_function_call(
# We cannot directly call self here because we need it to be
# converted into either the fast or slow object (by
# _fast_slow_function_call) to avoid infinite recursion.
# TODO: When Python 3.11 is the minimum supported Python version
# this can use operator.call
lambda fn, args, kwargs: fn(*args, **kwargs),
self,
args,
kwargs,
)
return result
class _FunctionProxy(_CallableProxyMixin):
"""
Proxy for a pair of fast and slow functions.
"""
__name__: str
def __init__(self, fast: Callable | _Unusable, slow: Callable):
self._fsproxy_fast = fast
self._fsproxy_slow = slow
functools.update_wrapper(self, slow)
class _MethodProxy(_CallableProxyMixin, _IntermediateProxy):
"""
Methods of fast-slow proxies are of type _MethodProxy.
"""
def _fast_slow_function_call(func: Callable, /, *args, **kwargs) -> Any:
"""
Call `func` with all `args` and `kwargs` converted to their
respective fast type. If that fails, call `func` with all
`args` and `kwargs` converted to their slow type.
Wrap the result in a fast-slow proxy if it is a type we know how
to wrap.
"""
from .module_accelerator import disable_module_accelerator
fast = False
try:
with nvtx.annotate(
"EXECUTE_FAST",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_FAST"],
domain="cudf_pandas",
):
fast_args, fast_kwargs = _fast_arg(args), _fast_arg(kwargs)
result = func(*fast_args, **fast_kwargs)
if result is NotImplemented:
# try slow path
raise Exception()
fast = True
except Exception:
with nvtx.annotate(
"EXECUTE_SLOW",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
):
slow_args, slow_kwargs = _slow_arg(args), _slow_arg(kwargs)
with disable_module_accelerator():
result = func(*slow_args, **slow_kwargs)
return _maybe_wrap_result(result, func, *args, **kwargs), fast
def _transform_arg(
arg: Any,
attribute_name: Literal["_fsproxy_slow", "_fsproxy_fast"],
seen: Set[int],
) -> Any:
"""
Transform "arg" into its corresponding slow (or fast) type.
"""
import numpy as np
if isinstance(arg, (_FastSlowProxy, _FastSlowProxyMeta, _FunctionProxy)):
typ = getattr(arg, attribute_name)
if typ is _Unusable:
raise Exception("Cannot transform _Unusable")
return typ
elif isinstance(arg, types.ModuleType) and attribute_name in arg.__dict__:
return arg.__dict__[attribute_name]
elif isinstance(arg, list):
return type(arg)(_transform_arg(a, attribute_name, seen) for a in arg)
elif isinstance(arg, tuple):
# This attempts to handle arbitrary subclasses of tuple by
# assuming that if you've subclassed tuple with some special
# behaviour you'll also make the object pickleable by
# implementing the custom pickle protocol interface (either
# __getnewargs_ex__ or __getnewargs__). Perhaps this should
# use __reduce_ex__ instead...
if type(arg) is tuple:
# Must come first to avoid infinite recursion
return tuple(_transform_arg(a, attribute_name, seen) for a in arg)
elif hasattr(arg, "__getnewargs_ex__"):
# Partial implementation of to reconstruct with
# transformed pieces
# This handles scipy._lib._bunch._make_tuple_bunch
args, kwargs = (
_transform_arg(a, attribute_name, seen)
for a in arg.__getnewargs_ex__()
)
obj = type(arg).__new__(type(arg), *args, **kwargs)
if hasattr(obj, "__setstate__"):
raise NotImplementedError(
"Transforming tuple-like with __getnewargs_ex__ and "
"__setstate__ not implemented"
)
if not hasattr(obj, "__dict__") and kwargs:
raise NotImplementedError(
"Transforming tuple-like with kwargs from "
"__getnewargs_ex__ and no __dict__ not implemented"
)
obj.__dict__.update(kwargs)
return obj
elif hasattr(arg, "__getnewargs__"):
# This handles namedtuple, and would catch tuple if we
# didn't handle it above.
args = _transform_arg(arg.__getnewargs__(), attribute_name, seen)
return type(arg).__new__(type(arg), *args)
else:
# Hope we can just call the constructor with transformed entries.
return type(arg)(
_transform_arg(a, attribute_name, seen) for a in args
)
elif isinstance(arg, dict):
return {
_transform_arg(k, attribute_name, seen): _transform_arg(
a, attribute_name, seen
)
for k, a in arg.items()
}
elif isinstance(arg, np.ndarray) and arg.dtype == "O":
transformed = [
_transform_arg(a, attribute_name, seen) for a in arg.flat
]
# Keep the same memory layout as arg (the default is C_CONTIGUOUS)
if arg.flags["F_CONTIGUOUS"] and not arg.flags["C_CONTIGUOUS"]:
order = "F"
else:
order = "C"
result = np.empty(int(np.prod(arg.shape)), dtype=object, order=order)
result[...] = transformed
return result.reshape(arg.shape)
elif isinstance(arg, Iterator) and attribute_name == "_fsproxy_fast":
# this may include consumable objects like generators or
# IOBase objects, which we don't want unavailable to the slow
# path in case of fallback. So, we raise here and ensure the
# slow path is taken:
raise Exception()
elif isinstance(arg, types.FunctionType):
if id(arg) in seen:
# `arg` is mutually recursive with another function. We
# can't handle these cases yet:
return arg
seen.add(id(arg))
return _replace_closurevars(arg, attribute_name, seen)
else:
return arg
def _fast_arg(arg: Any) -> Any:
"""
Transform "arg" into its corresponding fast type.
"""
seen: Set[int] = set()
return _transform_arg(arg, "_fsproxy_fast", seen)
def _slow_arg(arg: Any) -> Any:
"""
Transform "arg" into its corresponding slow type.
"""
seen: Set[int] = set()
return _transform_arg(arg, "_fsproxy_slow", seen)
def _maybe_wrap_result(result: Any, func: Callable, /, *args, **kwargs) -> Any:
"""
Wraps "result" in a fast-slow proxy if is a "proxiable" object.
"""
if _is_final_type(result):
typ = get_final_type_map()[type(result)]
return typ._fsproxy_wrap(result, func)
elif _is_intermediate_type(result):
typ = get_intermediate_type_map()[type(result)]
return typ._fsproxy_wrap(result, method_chain=(func, args, kwargs))
elif _is_final_class(result):
return get_final_type_map()[result]
elif isinstance(result, list):
return type(result)(
[
_maybe_wrap_result(r, operator.getitem, result, i)
for i, r in enumerate(result)
]
)
elif isinstance(result, tuple):
wrapped = (
_maybe_wrap_result(r, operator.getitem, result, i)
for i, r in enumerate(result)
)
if hasattr(result, "_make"):
# namedtuple
return type(result)._make(wrapped)
else:
return type(result)(wrapped)
elif isinstance(result, Iterator):
return (_maybe_wrap_result(r, lambda x: x, r) for r in result)
elif _is_function_or_method(result):
return _MethodProxy._fsproxy_wrap(
result, method_chain=(func, args, kwargs)
)
else:
return result
def _is_final_type(result: Any) -> bool:
return type(result) in get_final_type_map()
def _is_final_class(result: Any) -> bool:
if not isinstance(result, type):
return False
return result in get_final_type_map()
def _is_intermediate_type(result: Any) -> bool:
return type(result) in get_intermediate_type_map()
def _is_function_or_method(obj: Any) -> bool:
return isinstance(
obj,
(
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
types.WrapperDescriptorType,
types.MethodWrapperType,
types.MethodDescriptorType,
types.BuiltinMethodType,
),
)
def _replace_closurevars(
f: types.FunctionType,
attribute_name: Literal["_fsproxy_slow", "_fsproxy_fast"],
seen: Set[int],
) -> types.FunctionType:
"""
Return a copy of `f` with its closure variables replaced with
their corresponding slow (or fast) types.
"""
if f.__closure__:
# GH #254: If empty cells are present - which can happen in
# situations like when `f` is a method that invokes the
# "empty" `super()` - the call to `getclosurevars` below will
# fail. For now, we just return `f` in this case. If needed,
# we can consider populating empty cells with a placeholder
# value to allow the call to `getclosurevars` to succeed.
if any(c == types.CellType() for c in f.__closure__):
return f
f_nonlocals, f_globals, f_builtins, _ = inspect.getclosurevars(f)
g_globals = _transform_arg(f_globals, attribute_name, seen)
g_nonlocals = _transform_arg(f_nonlocals, attribute_name, seen)
# if none of the globals/nonlocals were transformed, we
# can just return f:
if all(f_globals[k] is g_globals[k] for k in f_globals) and all(
g_nonlocals[k] is f_nonlocals[k] for k in f_nonlocals
):
return f
g_closure = tuple(types.CellType(val) for val in g_nonlocals.values())
g_globals["__builtins__"] = f_builtins
g = types.FunctionType(
f.__code__,
g_globals,
name=f.__name__,
argdefs=f.__defaults__,
closure=g_closure,
)
g = functools.update_wrapper(
g,
f,
assigned=functools.WRAPPER_ASSIGNMENTS + ("__kwdefaults__",),
)
return g
_SPECIAL_METHODS: Set[str] = {
"__repr__",
"__str__",
"__len__",
"__contains__",
"__getitem__",
"__setitem__",
"__delitem__",
"__getslice__",
"__setslice__",
"__delslice__",
"__iter__",
"__lt__",
"__le__",
"__eq__",
"__ne__",
"__gt__",
"__ge__",
"__pos__",
"__neg__",
"__invert__",
"__abs__",
"__round__",
"__format__",
"__bool__",
"__float__",
"__int__",
"__complex__",
"__enter__",
"__exit__",
"__next__",
"__copy__",
"__deepcopy__",
"__dataframe__",
"__call__",
# Added on a per-proxy basis
# https://github.com/rapidsai/xdf/pull/306#pullrequestreview-1636155428
# "__hash__",
}
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/__init__.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from .magics import load_ipython_extension
from .profiler import Profiler
__all__ = ["Profiler", "load_ipython_extension", "install"]
LOADED = False
def install():
"""Enable Pandas Accelerator Mode."""
from .module_accelerator import ModuleAccelerator
loader = ModuleAccelerator.install("pandas", "cudf", "pandas")
global LOADED
LOADED = loader is not None
def pytest_load_initial_conftests(early_config, parser, args):
# We need to install ourselves before conftest.py import (which
# might import pandas) This hook is guaranteed to run before that
# happens see
# https://docs.pytest.org/en/7.1.x/reference/\
# reference.html#pytest.hookspec.pytest_load_initial_conftests
try:
install()
except RuntimeError:
raise RuntimeError(
"An existing plugin has already loaded pandas. Interposing failed."
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/__main__.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Usage:
python -m cudf.pandas <script.py> <args>
python -m cudf.pandas -m module <args>
"""
import argparse
import runpy
import sys
import tempfile
from contextlib import contextmanager
from . import install
from .profiler import Profiler, lines_with_profiling
@contextmanager
def profile(function_profile, line_profile, fn):
if line_profile:
with open(fn) as f:
lines = f.readlines()
with tempfile.NamedTemporaryFile(mode="w+b", suffix=".py") as f:
f.write(lines_with_profiling(lines, function_profile).encode())
f.seek(0)
yield f.name
elif function_profile:
with Profiler() as profiler:
yield fn
profiler.print_per_func_stats()
else:
yield fn
def main():
parser = argparse.ArgumentParser(
prog="python -m cudf.pandas",
description=(
"Run a Python script with Pandas Accelerator Mode enabled. "
"In Pandas Accelerator Mode, all imports of pandas will "
"automatically use GPU accelerated cuDF equivalents where "
"possible."
),
)
parser.add_argument(
"-m",
dest="module",
nargs=1,
)
parser.add_argument(
"--profile",
action="store_true",
help="Perform per-function profiling of this script.",
)
parser.add_argument(
"--line-profile",
action="store_true",
help="Perform per-line profiling of this script.",
)
parser.add_argument(
"args",
nargs=argparse.REMAINDER,
help="Arguments to pass on to the script",
)
args = parser.parse_args()
install()
with profile(args.profile, args.line_profile, args.args[0]) as fn:
args.args[0] = fn
if args.module:
(module,) = args.module
# run the module passing the remaining arguments
# as if it were run with python -m <module> <args>
sys.argv[:] = [module] + args.args # not thread safe?
runpy.run_module(module, run_name="__main__")
elif len(args.args) >= 1:
# Remove ourself from argv and continue
sys.argv[:] = args.args
runpy.run_path(args.args[0], run_name="__main__")
if __name__ == "__main__":
main()
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/_wrappers/common.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Utility custom overrides for special methods/properties
from ..fast_slow_proxy import (
_FastSlowAttribute,
_FastSlowProxy,
_maybe_wrap_result,
_slow_arg,
)
def array_method(self: _FastSlowProxy, *args, **kwargs):
return self._fsproxy_slow.__array__(*args, **kwargs)
def array_function_method(self, func, types, args, kwargs):
try:
return _FastSlowAttribute("__array_function__").__get__(self)(
func, types, args, kwargs
)
except Exception:
# if something went wrong with __array_function__ we
# attempt to call the function directly on the slow
# object. This ensures that the function call is
# handled in the same way as if the slow object was
# passed directly to the function.
slow_args, slow_kwargs = _slow_arg(args), _slow_arg(kwargs)
return _maybe_wrap_result(
func(*slow_args, **slow_kwargs), func, *args, **kwargs
)
def arrow_array_method(self: _FastSlowProxy, *args, **kwargs):
import pyarrow as pa
try:
return self._fsproxy_fast.to_arrow(*args, **kwargs)
except Exception:
return pa.array(self._fsproxy_slow, *args, **kwargs)
@property # type: ignore
def cuda_array_interface(self: _FastSlowProxy):
return self._fsproxy_fast.__cuda_array_interface__
def custom_iter(self: _FastSlowProxy):
return iter(self._fsproxy_slow)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/_wrappers/numpy.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import cupy
import cupy._core.flags
import numpy
import numpy.core.multiarray
from ..fast_slow_proxy import (
make_final_proxy_type,
make_intermediate_proxy_type,
)
from .common import (
array_method,
arrow_array_method,
cuda_array_interface,
custom_iter,
)
# https://docs.cupy.dev/en/stable/reference/creation.html
_CONSTRUCTORS = frozenset(
[
cupy.empty,
cupy.empty_like,
cupy.eye,
cupy.identity,
cupy.ones,
cupy.ones_like,
cupy.zeros,
cupy.zeros_like,
cupy.full,
cupy.full_like,
cupy.array,
cupy.asarray,
cupy.asanyarray,
cupy.ascontiguousarray,
cupy.copy,
cupy.frombuffer,
cupy.fromfile,
cupy.fromfunction,
cupy.fromiter,
cupy.fromstring,
cupy.loadtxt,
cupy.arange,
cupy.linspace,
cupy.logspace,
cupy.meshgrid,
cupy.diag,
cupy.diagflat,
cupy.tri,
cupy.tril,
cupy.triu,
cupy.vander,
]
)
def wrap_ndarray(cls, arr: cupy.ndarray | numpy.ndarray, constructor):
"""Wrap an ndarray in a proxy type
Parameters
----------
cls
Proxy type for ndarray
arr
Concrete result ndarray (cupy or numpy)
constructor
Function that was called to construct the concrete array, used
to check against a denylist to avoid unwrapping.
Returns
-------
The scalar .item() wrapped in its numpy dtype if arr is a
zero-dimensional cupy array (and wasn't just constructed as such),
a new proxy type otherwise.
Notes
-----
Axis-reducing operations in numpy return scalar objects but
zero-dimensional arrays in cupy. This confuses downstream
libraries when they get a fast (device-based) zero-dim array when
they were expecting a scalar. To avoid this, if the provided array
is a cupy array, and its shape is zero, unwrap it.
"""
if (
isinstance(arr, cupy.ndarray)
and arr.shape == ()
and constructor not in _CONSTRUCTORS
):
return arr.dtype.type(arr.item())
else:
# Note, this super call means that the constructed ndarray
# class cannot be subclassed (because then super(cls,
# cls)._fsproxy_wrap produces an infinite loop). Really this
# should be super(ndarray, cls), but we don't have access to
# the ndarray type until after we need to pass this function
# in. So it works for now since without subclassing,
# super(ndarray, cls) == super(ndarray, ndarray) == super(cls,
# cls)
return super(cls, cls)._fsproxy_wrap(arr, constructor)
ndarray = make_final_proxy_type(
"ndarray",
cupy.ndarray,
numpy.ndarray,
fast_to_slow=cupy.ndarray.get,
slow_to_fast=cupy.asarray,
additional_attributes={
"__array__": array_method,
# So that pa.array(wrapped-numpy-array) works
"__arrow_array__": arrow_array_method,
"__cuda_array_interface__": cuda_array_interface,
# ndarrays are unhashable
"__hash__": None,
# iter(cupy-array) produces an iterable of zero-dim device
# arrays, which is not usable in many settings (whereas
# iter(numpy-array) produces an iterable of scalars)
"__iter__": custom_iter,
# Special wrapping to handle scalar values
"_fsproxy_wrap": classmethod(wrap_ndarray),
},
)
# Mapping flags between slow and fast types
_ndarray_flags = make_intermediate_proxy_type(
"_ndarray_flags",
cupy._core.flags.Flags,
numpy.core.multiarray.flagsobj,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/_wrappers/__init__.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
from . import numpy, pandas
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/_wrappers/pandas.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import sys
import pandas as pd
import cudf
from ..annotation import nvtx
from ..fast_slow_proxy import (
_CUDF_PANDAS_NVTX_COLORS,
_DELETE,
_fast_slow_function_call,
_FastSlowAttribute,
_FunctionProxy,
_Unusable,
get_final_type_map,
make_final_proxy_type as _make_final_proxy_type,
make_intermediate_proxy_type as _make_intermediate_proxy_type,
register_proxy_func,
)
from .common import (
array_function_method,
array_method,
arrow_array_method,
cuda_array_interface,
custom_iter,
)
from pandas.io.sas.sas7bdat import ( # isort: skip
SAS7BDATReader as pd_SAS7BDATReader,
)
from pandas.io.sas.sas_xport import ( # isort: skip
XportReader as pd_XportReader,
)
# TODO(pandas2.1): Can import from pandas.api.typing
from pandas.core.resample import ( # isort: skip
Resampler as pd_Resampler,
TimeGrouper as pd_TimeGrouper,
)
cudf.set_option("mode.pandas_compatible", True)
def make_final_proxy_type(
name,
fast_type,
slow_type,
**kwargs,
):
assert "module" not in kwargs
return _make_final_proxy_type(
name, fast_type, slow_type, module=slow_type.__module__, **kwargs
)
def make_intermediate_proxy_type(name, fast_type, slow_type):
return _make_intermediate_proxy_type(
name, fast_type, slow_type, module=slow_type.__module__
)
class _AccessorAttr:
"""
Descriptor that ensures that accessors like `.dt` and `.str`
return the corresponding accessor types when accessed on `Series`
and `Index` _types_ (not instances).n
Attribute access for _instances_ uses the regular fast-then-slow
lookup defined in `__getattr__`.
"""
def __init__(self, typ):
self.__typ = typ
def __get__(self, obj, cls=None):
if obj is None:
return self.__typ
else:
# allow __getattr__ to handle this
raise AttributeError()
DatetimeProperties = make_intermediate_proxy_type(
"DatetimeProperties",
cudf.core.series.DatetimeProperties,
pd.core.indexes.accessors.DatetimeProperties,
)
TimedeltaProperties = make_intermediate_proxy_type(
"TimedeltaProperties",
cudf.core.series.TimedeltaProperties,
pd.core.indexes.accessors.TimedeltaProperties,
)
CombinedDatetimelikeProperties = make_intermediate_proxy_type(
"CombinedDatetimelikeProperties",
cudf.core.series.DatetimeProperties,
pd.core.indexes.accessors.CombinedDatetimelikeProperties,
)
StringMethods = make_intermediate_proxy_type(
"StringMethods",
cudf.core.column.string.StringMethods,
pd.core.strings.accessor.StringMethods,
)
_CategoricalAccessor = make_intermediate_proxy_type(
"CategoricalAccessor",
cudf.core.column.categorical.CategoricalAccessor,
pd.core.arrays.categorical.CategoricalAccessor,
)
def _DataFrame__dir__(self):
# Column names that are string identifiers are added to the dir of the
# DataFrame
# See https://github.com/pandas-dev/pandas/blob/43691a2f5d235b08f0f3aa813d8fdcb7c4ce1e47/pandas/core/indexes/base.py#L878 # noqa: E501
_pd_df_dir = dir(pd.DataFrame)
return _pd_df_dir + [
colname
for colname in self.columns
if isinstance(colname, str) and colname.isidentifier()
]
DataFrame = make_final_proxy_type(
"DataFrame",
cudf.DataFrame,
pd.DataFrame,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__array__": array_method,
"__dir__": _DataFrame__dir__,
"_constructor": _FastSlowAttribute("_constructor"),
"_constructor_sliced": _FastSlowAttribute("_constructor_sliced"),
},
)
Series = make_final_proxy_type(
"Series",
cudf.Series,
pd.Series,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__array__": array_method,
"__array_function__": array_function_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"__arrow_array__": arrow_array_method,
"__cuda_array_interface__": cuda_array_interface,
"__iter__": custom_iter,
"dt": _AccessorAttr(DatetimeProperties),
"str": _AccessorAttr(StringMethods),
"cat": _AccessorAttr(_CategoricalAccessor),
"_constructor": _FastSlowAttribute("_constructor"),
"_constructor_expanddim": _FastSlowAttribute("_constructor_expanddim"),
},
)
def Index__new__(cls, *args, **kwargs):
# Call fast/slow constructor
# This takes care of running __init__ as well, but must be paired
# with a removal of the defaulted __init__ that
# make_final_proxy_type provides.
self, _ = _fast_slow_function_call(
lambda cls, args, kwargs: cls(*args, **kwargs),
cls,
args,
kwargs,
)
return self
Index = make_final_proxy_type(
"Index",
cudf.Index,
pd.Index,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={
"__array__": array_method,
"__array_function__": array_function_method,
"__arrow_array__": arrow_array_method,
"__cuda_array_interface__": cuda_array_interface,
"dt": _AccessorAttr(DatetimeProperties),
"str": _AccessorAttr(StringMethods),
"cat": _AccessorAttr(_CategoricalAccessor),
"__iter__": custom_iter,
"__init__": _DELETE,
"__new__": Index__new__,
"_constructor": _FastSlowAttribute("_constructor"),
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
},
)
get_final_type_map()[cudf.StringIndex] = Index
get_final_type_map()[cudf.Int8Index] = Index
get_final_type_map()[cudf.Int8Index] = Index
get_final_type_map()[cudf.Int16Index] = Index
get_final_type_map()[cudf.Int32Index] = Index
get_final_type_map()[cudf.UInt8Index] = Index
get_final_type_map()[cudf.UInt16Index] = Index
get_final_type_map()[cudf.UInt32Index] = Index
get_final_type_map()[cudf.UInt64Index] = Index
get_final_type_map()[cudf.Float32Index] = Index
get_final_type_map()[cudf.GenericIndex] = Index
RangeIndex = make_final_proxy_type(
"RangeIndex",
cudf.RangeIndex,
pd.RangeIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
SparseDtype = make_final_proxy_type(
"SparseDtype",
_Unusable,
pd.SparseDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
SparseArray = make_final_proxy_type(
"SparseDtype",
_Unusable,
pd.arrays.SparseArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
CategoricalIndex = make_final_proxy_type(
"CategoricalIndex",
cudf.CategoricalIndex,
pd.CategoricalIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
Categorical = make_final_proxy_type(
"Categorical",
_Unusable,
pd.Categorical,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
CategoricalDtype = make_final_proxy_type(
"CategoricalDtype",
cudf.CategoricalDtype,
pd.CategoricalDtype,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
DatetimeIndex = make_final_proxy_type(
"DatetimeIndex",
cudf.DatetimeIndex,
pd.DatetimeIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
DatetimeArray = make_final_proxy_type(
"DatetimeArray",
_Unusable,
pd.arrays.DatetimeArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
DatetimeTZDtype = make_final_proxy_type(
"DatetimeTZDtype",
_Unusable,
pd.DatetimeTZDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
TimedeltaIndex = make_final_proxy_type(
"TimedeltaIndex",
cudf.TimedeltaIndex,
pd.TimedeltaIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
TimedeltaArray = make_final_proxy_type(
"TimedeltaArray",
_Unusable,
pd.arrays.TimedeltaArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
PeriodIndex = make_final_proxy_type(
"PeriodIndex",
_Unusable,
pd.PeriodIndex,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
PeriodArray = make_final_proxy_type(
"PeriodArray",
_Unusable,
pd.arrays.PeriodArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
PeriodDtype = make_final_proxy_type(
"PeriodDtype",
_Unusable,
pd.PeriodDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
Period = make_final_proxy_type(
"Period",
_Unusable,
pd.Period,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
MultiIndex = make_final_proxy_type(
"MultiIndex",
cudf.MultiIndex,
pd.MultiIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
TimeGrouper = make_intermediate_proxy_type(
"TimeGrouper",
_Unusable,
pd_TimeGrouper,
)
Grouper = make_final_proxy_type(
"Grouper",
cudf.Grouper,
pd.Grouper,
fast_to_slow=lambda fast: pd.Grouper(
**{
k: getattr(fast, k)
for k in {"key", "level", "freq", "closed", "label"}
if getattr(fast, k) is not None
}
),
slow_to_fast=lambda slow: cudf.Grouper(
**{
k: getattr(slow, k)
for k in {"key", "level", "freq", "closed", "label"}
if getattr(slow, k) is not None
}
),
)
StringArray = make_final_proxy_type(
"StringArray",
_Unusable,
pd.arrays.StringArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
StringDtype = make_final_proxy_type(
"StringDtype",
_Unusable,
pd.StringDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BooleanArray = make_final_proxy_type(
"BooleanArray",
_Unusable,
pd.arrays.BooleanArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__")
},
)
BooleanDtype = make_final_proxy_type(
"BooleanDtype",
_Unusable,
pd.BooleanDtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
IntegerArray = make_final_proxy_type(
"IntegerArray",
_Unusable,
pd.arrays.IntegerArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__")
},
)
Int8Dtype = make_final_proxy_type(
"Int8Dtype",
_Unusable,
pd.Int8Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Int16Dtype = make_final_proxy_type(
"Int16Dtype",
_Unusable,
pd.Int16Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Int32Dtype = make_final_proxy_type(
"Int32Dtype",
_Unusable,
pd.Int32Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Int64Dtype = make_final_proxy_type(
"Int64Dtype",
_Unusable,
pd.Int64Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Int64Index = make_final_proxy_type(
"Int64Index",
cudf.Int64Index,
pd.core.indexes.numeric.Int64Index,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
UInt8Dtype = make_final_proxy_type(
"UInt8Dtype",
_Unusable,
pd.UInt8Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
UInt16Dtype = make_final_proxy_type(
"UInt16Dtype",
_Unusable,
pd.UInt16Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
UInt32Dtype = make_final_proxy_type(
"UInt32Dtype",
_Unusable,
pd.UInt32Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
UInt64Dtype = make_final_proxy_type(
"UInt64Dtype",
_Unusable,
pd.UInt64Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
UInt64Index = make_final_proxy_type(
"UInt64Index",
cudf.UInt64Index,
pd.core.indexes.numeric.UInt64Index,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
IntervalIndex = make_final_proxy_type(
"IntervalIndex",
cudf.IntervalIndex,
pd.IntervalIndex,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
IntervalArray = make_final_proxy_type(
"IntervalArray",
_Unusable,
pd.arrays.IntervalArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
IntervalDtype = make_final_proxy_type(
"IntervalDtype",
cudf.IntervalDtype,
pd.IntervalDtype,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Interval = make_final_proxy_type(
"Interval",
_Unusable,
pd.Interval,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
FloatingArray = make_final_proxy_type(
"FloatingArray",
_Unusable,
pd.arrays.FloatingArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__")
},
)
Float32Dtype = make_final_proxy_type(
"Float32Dtype",
_Unusable,
pd.Float32Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Float64Dtype = make_final_proxy_type(
"Float64Dtype",
_Unusable,
pd.Float64Dtype,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Float64Index = make_final_proxy_type(
"Float64Index",
cudf.Float64Index,
pd.core.indexes.numeric.Float64Index,
fast_to_slow=lambda fast: fast.to_pandas(),
slow_to_fast=cudf.from_pandas,
bases=(Index,),
additional_attributes={"__init__": _DELETE},
)
SeriesGroupBy = make_intermediate_proxy_type(
"SeriesGroupBy",
cudf.core.groupby.groupby.SeriesGroupBy,
pd.core.groupby.SeriesGroupBy,
)
DataFrameGroupBy = make_intermediate_proxy_type(
"DataFrameGroupBy",
cudf.core.groupby.groupby.DataFrameGroupBy,
pd.core.groupby.DataFrameGroupBy,
)
RollingGroupBy = make_intermediate_proxy_type(
"RollingGroupBy",
cudf.core.window.rolling.RollingGroupby,
pd.core.window.rolling.RollingGroupby,
)
_SeriesIlocIndexer = make_intermediate_proxy_type(
"_SeriesIlocIndexer",
cudf.core.series._SeriesIlocIndexer,
pd.core.indexing._iLocIndexer,
)
_DataFrameIlocIndexer = make_intermediate_proxy_type(
"_SeriesIlocIndexer",
cudf.core.dataframe._DataFrameIlocIndexer,
pd.core.indexing._iLocIndexer,
)
_SeriesLocIndexer = make_intermediate_proxy_type(
"_SeriesLocIndexer",
cudf.core.series._SeriesLocIndexer,
pd.core.indexing._LocIndexer,
)
_DataFrameLocIndexer = make_intermediate_proxy_type(
"_DataFrameLocIndexer",
cudf.core.dataframe._DataFrameLocIndexer,
pd.core.indexing._LocIndexer,
)
FixedForwardWindowIndexer = make_final_proxy_type(
"FixedForwardWindowIndexer",
_Unusable,
pd.api.indexers.FixedForwardWindowIndexer,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
VariableOffsetWindowIndexer = make_final_proxy_type(
"VariableOffsetWindowIndexer",
_Unusable,
pd.api.indexers.VariableOffsetWindowIndexer,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
Window = make_intermediate_proxy_type(
"Window",
_Unusable,
pd.core.window.rolling.Window,
)
Rolling = make_intermediate_proxy_type(
"Rolling",
cudf.core.window.Rolling,
pd.core.window.Rolling,
)
ExponentialMovingWindow = make_intermediate_proxy_type(
"ExponentialMovingWindow",
_Unusable,
pd.core.window.ewm.ExponentialMovingWindow,
)
ExponentialMovingWindowGroupby = make_intermediate_proxy_type(
"ExponentialMovingWindowGroupby",
_Unusable,
pd.core.window.ewm.ExponentialMovingWindowGroupby,
)
EWMMeanState = make_intermediate_proxy_type(
"EWMMeanState",
_Unusable,
pd.core.window.online.EWMMeanState,
)
Expanding = make_intermediate_proxy_type(
"Expanding",
_Unusable,
pd.core.window.expanding.Expanding,
)
ExpandingGroupby = make_intermediate_proxy_type(
"ExpandingGroupby",
_Unusable,
pd.core.window.expanding.ExpandingGroupby,
)
Resampler = make_intermediate_proxy_type(
"Resampler", cudf.core.resample._Resampler, pd_Resampler
)
StataReader = make_intermediate_proxy_type(
"StataReader",
_Unusable,
pd.io.stata.StataReader,
)
HDFStore = make_final_proxy_type(
"HDFStore",
_Unusable,
pd.HDFStore,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
ExcelFile = make_final_proxy_type(
"ExcelFile",
_Unusable,
pd.ExcelFile,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
ExcelWriter = make_final_proxy_type(
"ExcelWriter",
_Unusable,
pd.ExcelWriter,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
try:
from pandas.io.formats.style import Styler as pd_Styler # isort: skip
Styler = make_final_proxy_type(
"Styler",
_Unusable,
pd_Styler,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
except ImportError:
# Styler requires Jinja to be installed
pass
_eval_func = _FunctionProxy(_Unusable(), pd.eval)
def _get_eval_locals_and_globals(level, local_dict=None, global_dict=None):
frame = sys._getframe(level + 3)
local_dict = frame.f_locals if local_dict is None else local_dict
global_dict = frame.f_globals if global_dict is None else global_dict
return local_dict, global_dict
@register_proxy_func(pd.eval)
@nvtx.annotate(
"CUDF_PANDAS_EVAL",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
)
def _eval(
*args,
parser="pandas",
engine=None,
local_dict=None,
global_dict=None,
**kwargs,
):
# Custom implementation of to pre-process globals and
# locals before calling pd.eval.
level = kwargs.get("level", 0)
local_dict, global_dict = _get_eval_locals_and_globals(
level, local_dict, global_dict
)
return _eval_func(
*args,
parser=parser,
engine=engine,
local_dict=local_dict,
global_dict=global_dict,
**kwargs,
)
@nvtx.annotate(
"CUDF_PANDAS_DATAFRAME_EVAL",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
)
def _df_eval_method(self, *args, local_dict=None, global_dict=None, **kwargs):
level = kwargs.get("level", 0)
local_dict, global_dict = _get_eval_locals_and_globals(
level, local_dict, global_dict
)
return super(type(self), self).__getattr__("eval")(
*args, local_dict=local_dict, global_dict=global_dict, **kwargs
)
@nvtx.annotate(
"CUDF_PANDAS_DATAFRAME_QUERY",
color=_CUDF_PANDAS_NVTX_COLORS["EXECUTE_SLOW"],
domain="cudf_pandas",
)
def _df_query_method(self, *args, local_dict=None, global_dict=None, **kwargs):
# `query` API internally calls `eval`, hence we are making use of
# helps of `eval` to populate locals and globals dict.
level = kwargs.get("level", 0)
local_dict, global_dict = _get_eval_locals_and_globals(
level, local_dict, global_dict
)
return super(type(self), self).__getattr__("query")(
*args, local_dict=local_dict, global_dict=global_dict, **kwargs
)
DataFrame.eval = _df_eval_method # type: ignore
DataFrame.query = _df_query_method # type: ignore
_JsonReader = make_intermediate_proxy_type(
"_JsonReader",
_Unusable,
pd.io.json._json.JsonReader,
)
_TextFileReader = make_intermediate_proxy_type(
"_TextFileReader", _Unusable, pd.io.parsers.readers.TextFileReader
)
_XportReader = make_intermediate_proxy_type(
"_XportReader", _Unusable, pd_XportReader
)
_SAS7BDATReader = make_intermediate_proxy_type(
"_SAS7BDATReader", _Unusable, pd_SAS7BDATReader
)
FY5253 = make_final_proxy_type(
"FY5253",
_Unusable,
pd.offsets.FY5253,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BDay = make_final_proxy_type(
"BDay",
_Unusable,
pd.offsets.BDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BMonthBegin = make_final_proxy_type(
"BMonthBegin",
_Unusable,
pd.offsets.BMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BMonthEnd = make_final_proxy_type(
"BMonthEnd",
_Unusable,
pd.offsets.BMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BQuarterBegin = make_final_proxy_type(
"BQuarterBegin",
_Unusable,
pd.offsets.BQuarterBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BQuarterEnd = make_final_proxy_type(
"BQuarterEnd",
_Unusable,
pd.offsets.BQuarterEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BusinessDay = make_final_proxy_type(
"BusinessDay",
_Unusable,
pd.offsets.BusinessDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BusinessHour = make_final_proxy_type(
"BusinessHour",
_Unusable,
pd.offsets.BusinessHour,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BusinessMonthBegin = make_final_proxy_type(
"BusinessMonthBegin",
_Unusable,
pd.offsets.BusinessMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BusinessMonthEnd = make_final_proxy_type(
"BusinessMonthEnd",
_Unusable,
pd.offsets.BusinessMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BYearBegin = make_final_proxy_type(
"BYearBegin",
_Unusable,
pd.offsets.BYearBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
BYearEnd = make_final_proxy_type(
"BYearEnd",
_Unusable,
pd.offsets.BYearEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CBMonthBegin = make_final_proxy_type(
"CBMonthBegin",
_Unusable,
pd.offsets.CBMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CBMonthEnd = make_final_proxy_type(
"CBMonthEnd",
_Unusable,
pd.offsets.CBMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CDay = make_final_proxy_type(
"CDay",
_Unusable,
pd.offsets.CDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CustomBusinessDay = make_final_proxy_type(
"CustomBusinessDay",
_Unusable,
pd.offsets.CustomBusinessDay,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CustomBusinessHour = make_final_proxy_type(
"CustomBusinessHour",
_Unusable,
pd.offsets.CustomBusinessHour,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CustomBusinessMonthBegin = make_final_proxy_type(
"CustomBusinessMonthBegin",
_Unusable,
pd.offsets.CustomBusinessMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
CustomBusinessMonthEnd = make_final_proxy_type(
"CustomBusinessMonthEnd",
_Unusable,
pd.offsets.CustomBusinessMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
DateOffset = make_final_proxy_type(
"DateOffset",
_Unusable,
pd.offsets.DateOffset,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Day = make_final_proxy_type(
"Day",
_Unusable,
pd.offsets.Day,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Easter = make_final_proxy_type(
"Easter",
_Unusable,
pd.offsets.Easter,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
FY5253Quarter = make_final_proxy_type(
"FY5253Quarter",
_Unusable,
pd.offsets.FY5253Quarter,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Hour = make_final_proxy_type(
"Hour",
_Unusable,
pd.offsets.Hour,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
LastWeekOfMonth = make_final_proxy_type(
"LastWeekOfMonth",
_Unusable,
pd.offsets.LastWeekOfMonth,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Micro = make_final_proxy_type(
"Micro",
_Unusable,
pd.offsets.Micro,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Milli = make_final_proxy_type(
"Milli",
_Unusable,
pd.offsets.Milli,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Minute = make_final_proxy_type(
"Minute",
_Unusable,
pd.offsets.Minute,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
MonthBegin = make_final_proxy_type(
"MonthBegin",
_Unusable,
pd.offsets.MonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
MonthEnd = make_final_proxy_type(
"MonthEnd",
_Unusable,
pd.offsets.MonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Nano = make_final_proxy_type(
"Nano",
_Unusable,
pd.offsets.Nano,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
QuarterBegin = make_final_proxy_type(
"QuarterBegin",
_Unusable,
pd.offsets.QuarterBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
QuarterEnd = make_final_proxy_type(
"QuarterEnd",
_Unusable,
pd.offsets.QuarterEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Second = make_final_proxy_type(
"Second",
_Unusable,
pd.offsets.Second,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
SemiMonthBegin = make_final_proxy_type(
"SemiMonthBegin",
_Unusable,
pd.offsets.SemiMonthBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
SemiMonthEnd = make_final_proxy_type(
"SemiMonthEnd",
_Unusable,
pd.offsets.SemiMonthEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Tick = make_final_proxy_type(
"Tick",
_Unusable,
pd.offsets.Tick,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Week = make_final_proxy_type(
"Week",
_Unusable,
pd.offsets.Week,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
WeekOfMonth = make_final_proxy_type(
"WeekOfMonth",
_Unusable,
pd.offsets.WeekOfMonth,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
YearBegin = make_final_proxy_type(
"YearBegin",
_Unusable,
pd.offsets.YearBegin,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
YearEnd = make_final_proxy_type(
"YearEnd",
_Unusable,
pd.offsets.YearEnd,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
Flags = make_final_proxy_type(
"Flags",
_Unusable,
pd.Flags,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
NamedAgg = make_final_proxy_type(
"NamedAgg",
_Unusable,
pd.NamedAgg,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={"__hash__": _FastSlowAttribute("__hash__")},
)
ArrowExtensionArray = make_final_proxy_type(
"ExtensionArray",
_Unusable,
pd.arrays.ArrowExtensionArray,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
)
# The following are subclasses of `pandas.core.base.PandasObj`,
# excluding subclasses defined in `pandas.core.internals`. These are
# not strictly part of the Pandas public API, but they do appear as
# return types.
_PANDAS_OBJ_FINAL_TYPES = [
pd.core.arrays.sparse.array.SparseArray,
pd.core.indexes.frozen.FrozenList,
pd.core.indexes.category.CategoricalIndex,
pd.core.indexes.datetimelike.DatetimeTimedeltaMixin,
pd.core.indexes.datetimelike.DatetimeIndexOpsMixin,
pd.core.indexes.extension.NDArrayBackedExtensionIndex,
pd.core.indexes.numeric.IntegerIndex,
pd.core.indexes.numeric.NumericIndex,
pd.core.generic.NDFrame,
pd.core.indexes.accessors.PeriodProperties,
pd.core.indexes.accessors.Properties,
pd.plotting._core.PlotAccessor,
pd.io.sql.SQLiteTable,
pd.io.sql.SQLTable,
pd.io.sql.SQLDatabase,
pd.io.sql.SQLiteDatabase,
pd.io.sql.PandasSQL,
]
_PANDAS_OBJ_INTERMEDIATE_TYPES = [
pd.core.groupby.groupby.GroupByPlot,
pd.core.groupby.groupby.GroupBy,
pd.core.groupby.groupby.BaseGroupBy,
]
for typ in _PANDAS_OBJ_FINAL_TYPES:
if typ.__name__ in globals():
# if we already defined a proxy type
# corresponding to this type, use that.
continue
globals()[typ.__name__] = make_final_proxy_type(
typ.__name__,
_Unusable,
typ,
fast_to_slow=_Unusable(),
slow_to_fast=_Unusable(),
additional_attributes={
"__array__": array_method,
"__array_function__": array_function_method,
"__array_ufunc__": _FastSlowAttribute("__array_ufunc__"),
"__hash__": _FastSlowAttribute("__hash__"),
},
)
for typ in _PANDAS_OBJ_INTERMEDIATE_TYPES:
if typ.__name__ in globals():
# if we already defined a proxy type
# corresponding to this type, use that.
continue
globals()[typ.__name__] = make_intermediate_proxy_type(
typ.__name__,
_Unusable,
typ,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/scripts/run-pandas-tests.sh
|
#!/usr/bin/env bash
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# Run Pandas unit tests with cudf.pandas.
#
# Usage:
# run-pandas-tests.sh <pytest args> <path to pandas tests (optional)>
#
# Examples
# Run a single test
# run-pandas-tests.sh -n auto -v tests/groupby/test_groupby_dropna.py
# Run all tests
# run-pandas-tests.sh --tb=line --report-log=log.json
#
# This script creates a `pandas-testing` directory if it doesn't exist
# Grab the Pandas source corresponding to the version
# of Pandas installed.
PANDAS_VERSION=$(python -c "import pandas; print(pandas.__version__)")
PYTEST_IGNORES="--ignore=tests/io/test_user_agent.py"
mkdir -p pandas-testing
cd pandas-testing
if [ ! -d "pandas" ]; then
git clone https://github.com/pandas-dev/pandas
fi
cd pandas && git clean -fdx && git checkout v$PANDAS_VERSION && cd ../
if [ ! -d "pandas-tests" ]; then
# Copy just the tests out of the Pandas source tree.
# Not exactly sure why this is needed but Pandas
# imports fail if we don't do this:
mkdir -p pandas-tests
cp -r pandas/pandas/tests pandas-tests/
# directory layout requirement
# conftest.py
# pyproject.toml
# tests/
cp pandas/pandas/conftest.py pandas-tests/conftest.py
# Vendored from pandas/pyproject.toml
cat > pandas-tests/pyproject.toml << \EOF
[tool.pytest.ini_options]
xfail_strict = true
filterwarnings = [
"error:Sparse:FutureWarning",
"error:The SparseArray:FutureWarning",
# Deprecation gives warning on import during pytest collection
"ignore:pandas.core.index is deprecated:FutureWarning:importlib",
"ignore:pandas.util.testing is deprecated:FutureWarning:importlib",
# Will be fixed in numba 0.56: https://github.com/numba/numba/issues/7758
"ignore:`np.MachAr` is deprecated:DeprecationWarning:numba",
]
markers = [
"single_cpu: tests that should run on a single cpu only",
"slow: mark a test as slow",
"network: mark a test as network",
"db: tests requiring a database (mysql or postgres)",
"clipboard: mark a pd.read_clipboard test",
"arm_slow: mark a test as slow for arm64 architecture",
"arraymanager: mark a test to run with ArrayManager enabled",
]
EOF
# append the contents of patch-confest.py to conftest.py
cat ../python/cudf/cudf/pandas/scripts/conftest-patch.py >> pandas-tests/conftest.py
# Substitute `pandas.tests` with a relative import.
# This will depend on the location of the test module relative to
# the pandas-tests directory.
for hit in $(find . -iname '*.py' | xargs grep "pandas.tests" | cut -d ":" -f 1 | sort | uniq); do
# Get the relative path to the test module
test_module=$(echo $hit | cut -d "/" -f 2-)
# Get the number of directories to go up
num_dirs=$(echo $test_module | grep -o "/" | wc -l)
num_dots=$(($num_dirs - 2))
# Construct the relative import
relative_import=$(printf "%0.s." $(seq 1 $num_dots))
# Replace the import
sed -i "s/pandas.tests/${relative_import}/g" $hit
done
fi
# append the contents of patch-confest.py to conftest.py
cat ../python/cudf/cudf/pandas/scripts/conftest-patch.py >> pandas-tests/conftest.py
# Run the tests
cd pandas-tests/
# TODO: Get a postgres & mysql container set up on the CI
# test_overwrite_warns unsafely patchs over Series.mean affecting other tests when run in parallel
# test_complex_series_frame_alignment randomly selects a DataFrames and axis to test but particular random selection(s) always fails
# test_numpy_ufuncs_basic compares floating point values to unbounded precision, sometimes leading to failures
TEST_NUMPY_UFUNCS_BASIC_FLAKY="test_numpy_ufuncs_basic[float-exp] \
and not test_numpy_ufuncs_basic[float-exp2] \
and not test_numpy_ufuncs_basic[float-expm1] \
and not test_numpy_ufuncs_basic[float-log] \
and not test_numpy_ufuncs_basic[float-log2] \
and not test_numpy_ufuncs_basic[float-log10] \
and not test_numpy_ufuncs_basic[float-log1p] \
and not test_numpy_ufuncs_basic[float-sqrt] \
and not test_numpy_ufuncs_basic[float-sin] \
and not test_numpy_ufuncs_basic[float-cos] \
and not test_numpy_ufuncs_basic[float-tan] \
and not test_numpy_ufuncs_basic[float-arcsin] \
and not test_numpy_ufuncs_basic[float-arccos] \
and not test_numpy_ufuncs_basic[float-arctan] \
and not test_numpy_ufuncs_basic[float-sinh] \
and not test_numpy_ufuncs_basic[float-cosh] \
and not test_numpy_ufuncs_basic[float-tanh] \
and not test_numpy_ufuncs_basic[float-arcsinh] \
and not test_numpy_ufuncs_basic[float-arccosh] \
and not test_numpy_ufuncs_basic[float-arctanh] \
and not test_numpy_ufuncs_basic[float-deg2rad] \
and not test_numpy_ufuncs_basic[float-rad2deg] \
and not test_numpy_ufuncs_basic[num_float64-exp] \
and not test_numpy_ufuncs_basic[num_float64-exp2] \
and not test_numpy_ufuncs_basic[num_float64-expm1] \
and not test_numpy_ufuncs_basic[num_float64-log] \
and not test_numpy_ufuncs_basic[num_float64-log2] \
and not test_numpy_ufuncs_basic[num_float64-log10] \
and not test_numpy_ufuncs_basic[num_float64-log1p] \
and not test_numpy_ufuncs_basic[num_float64-sqrt] \
and not test_numpy_ufuncs_basic[num_float64-sin] \
and not test_numpy_ufuncs_basic[num_float64-cos] \
and not test_numpy_ufuncs_basic[num_float64-tan] \
and not test_numpy_ufuncs_basic[num_float64-arcsin] \
and not test_numpy_ufuncs_basic[num_float64-arccos] \
and not test_numpy_ufuncs_basic[num_float64-arctan] \
and not test_numpy_ufuncs_basic[num_float64-sinh] \
and not test_numpy_ufuncs_basic[num_float64-cosh] \
and not test_numpy_ufuncs_basic[num_float64-tanh] \
and not test_numpy_ufuncs_basic[num_float64-arcsinh] \
and not test_numpy_ufuncs_basic[num_float64-arccosh] \
and not test_numpy_ufuncs_basic[num_float64-arctanh] \
and not test_numpy_ufuncs_basic[num_float64-deg2rad] \
and not test_numpy_ufuncs_basic[num_float64-rad2deg] \
and not test_numpy_ufuncs_basic[num_float32-exp] \
and not test_numpy_ufuncs_basic[num_float32-exp2] \
and not test_numpy_ufuncs_basic[num_float32-expm1] \
and not test_numpy_ufuncs_basic[num_float32-log] \
and not test_numpy_ufuncs_basic[num_float32-log2] \
and not test_numpy_ufuncs_basic[num_float32-log10] \
and not test_numpy_ufuncs_basic[num_float32-log1p] \
and not test_numpy_ufuncs_basic[num_float32-sqrt] \
and not test_numpy_ufuncs_basic[num_float32-sin] \
and not test_numpy_ufuncs_basic[num_float32-cos] \
and not test_numpy_ufuncs_basic[num_float32-tan] \
and not test_numpy_ufuncs_basic[num_float32-arcsin] \
and not test_numpy_ufuncs_basic[num_float32-arccos] \
and not test_numpy_ufuncs_basic[num_float32-arctan] \
and not test_numpy_ufuncs_basic[num_float32-sinh] \
and not test_numpy_ufuncs_basic[num_float32-cosh] \
and not test_numpy_ufuncs_basic[num_float32-tanh] \
and not test_numpy_ufuncs_basic[num_float32-arcsinh] \
and not test_numpy_ufuncs_basic[num_float32-arccosh] \
and not test_numpy_ufuncs_basic[num_float32-arctanh] \
and not test_numpy_ufuncs_basic[num_float32-deg2rad] \
and not test_numpy_ufuncs_basic[num_float32-rad2deg] \
and not test_numpy_ufuncs_basic[nullable_float-exp] \
and not test_numpy_ufuncs_basic[nullable_float-exp2] \
and not test_numpy_ufuncs_basic[nullable_float-expm1] \
and not test_numpy_ufuncs_basic[nullable_float-log] \
and not test_numpy_ufuncs_basic[nullable_float-log2] \
and not test_numpy_ufuncs_basic[nullable_float-log10] \
and not test_numpy_ufuncs_basic[nullable_float-log1p] \
and not test_numpy_ufuncs_basic[nullable_float-sqrt] \
and not test_numpy_ufuncs_basic[nullable_float-sin] \
and not test_numpy_ufuncs_basic[nullable_float-cos] \
and not test_numpy_ufuncs_basic[nullable_float-tan] \
and not test_numpy_ufuncs_basic[nullable_float-arcsin] \
and not test_numpy_ufuncs_basic[nullable_float-arccos] \
and not test_numpy_ufuncs_basic[nullable_float-arctan] \
and not test_numpy_ufuncs_basic[nullable_float-sinh] \
and not test_numpy_ufuncs_basic[nullable_float-cosh] \
and not test_numpy_ufuncs_basic[nullable_float-tanh] \
and not test_numpy_ufuncs_basic[nullable_float-arcsinh] \
and not test_numpy_ufuncs_basic[nullable_float-arccosh] \
and not test_numpy_ufuncs_basic[nullable_float-arctanh] \
and not test_numpy_ufuncs_basic[nullable_float-deg2rad] \
and not test_numpy_ufuncs_basic[nullable_float-rad2deg]"
PANDAS_CI="1" python -m pytest -p cudf.pandas \
-m "not single_cpu and not db" \
-k "not test_overwrite_warns and not test_complex_series_frame_alignment and not $TEST_NUMPY_UFUNCS_BASIC_FLAKY" \
--durations=50 \
--import-mode=importlib \
-o xfail_strict=True \
${PYTEST_IGNORES} $@
mv *.json ..
cd ..
rm -rf pandas-testing/pandas-tests/
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/scripts/conftest-patch.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
import contextlib
import os
import sys
from functools import wraps
import pytest
def replace_kwargs(new_kwargs):
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
kwargs.update(new_kwargs)
return func(*args, **kwargs)
return wrapped
return wrapper
@contextlib.contextmanager
def null_assert_warnings(*args, **kwargs):
try:
yield []
finally:
pass
@pytest.fixture(scope="session", autouse=True) # type: ignore
def patch_testing_functions():
tm.assert_produces_warning = null_assert_warnings
pytest.raises = replace_kwargs({"match": None})(pytest.raises)
sys.path.append(os.path.dirname(__file__))
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/scripts/summarize-test-results.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Summarizes the test results per module.
Examples:
python summarize-test-results.py log.json
python summarize-test-results.py log.json --output json
python summarize-test-results.py log.json --output table
"""
import argparse
import json
from rich.console import Console
from rich.table import Table
PANDAS_TEST_PREFIX = "pandas-tests/"
def get_per_module_results(log_file_name):
per_module_results = {}
with open(log_file_name) as f:
for line in f:
try:
line = json.loads(line)
except Exception:
line = {}
if "outcome" in line:
outcome = line["outcome"]
# outcome can be "passed", "failed", or "skipped".
# Depending on other fields, it can indicate
# an errored, xpassed, or xfailed test.
if line.get("when", None) != "call":
# when != call indicates test setup or teardown
if outcome == "failed":
# if the test failed during setup or teardown,
# it counts as an "errored" test:
outcome = "errored"
else:
# we don't care about other outcomes during
# setup or teardown
continue
else:
if line.get("wasxfail", False) and outcome == "passed":
# it's an xpassed test
outcome = "failed"
module_name = (
line["nodeid"]
.split("::")[0]
.removeprefix(PANDAS_TEST_PREFIX)
)
per_module_results.setdefault(module_name, {})
per_module_results[module_name].setdefault("total", 0)
per_module_results[module_name].setdefault(outcome, 0)
per_module_results[module_name]["total"] += 1
per_module_results[module_name][outcome] += 1
return per_module_results
def sort_results(results):
sorted_keys = sorted(
results, key=lambda key: results[key].get("failed", 0)
)
return {key: results[key] for key in sorted_keys}
def print_results_as_json(results):
print(json.dumps(results, indent=4))
def print_results_as_table(results):
table = Table()
table.add_column("Test module")
table.add_column("Total tests")
table.add_column("Passed tests")
table.add_column("Failed tests")
table.add_column("Errored tests")
table.add_column("Skipped tests")
totals = {"total": 0, "passed": 0, "failed": 0, "errored": 0, "skipped": 0}
for module_name, row in results.items():
values = []
for key in ("total", "passed", "failed", "errored", "skipped"):
totals[key] += row.get(key, 0)
values.append(row.get(key, 0))
table.add_row(module_name, *map(str, values))
table.add_section()
table.add_row(
"total={}, passed={}, failed={}, errored={}, skipped={}".format(
*map(str, totals.values())
)
)
console = Console()
console.print(table)
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"log_file_name", nargs=1, help="The input log file name"
)
parser.add_argument(
"--output",
choices=["json", "table"],
default="table",
help="The output format",
)
args = parser.parse_args()
results = sort_results(get_per_module_results(args.log_file_name[0]))
if args.output == "json":
print_results_as_json(results)
else:
print_results_as_table(results)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf/pandas
|
rapidsai_public_repos/cudf/python/cudf/cudf/pandas/scripts/analyze-test-failures.py
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES.
# All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Prints the most common test failures for the given tests.
Usage:
python analyze-test-failures.py <path-to-test-log> <file-or-pattern>
Example:
python analyze-test-failures.py log.json frame/*
"""
import json
import sys
from collections import Counter
from fnmatch import fnmatch
from rich.console import Console
from rich.table import Table
PANDAS_TEST_PREFIX = "pandas-tests/"
def count_failures(log_file_name, pattern):
counter = Counter()
with open(log_file_name) as f:
for line in f:
try:
line = json.loads(line)
except Exception:
continue
if (
"location" in line
and line["when"] == "call"
and line["outcome"] == "failed"
):
line_module_name = line["location"][0].removeprefix(
PANDAS_TEST_PREFIX
)
if fnmatch(line_module_name, pattern):
if "longrepr" in line and line["longrepr"]:
if isinstance(line["longrepr"], (tuple, list)):
message = line["longrepr"][2].splitlines()[0]
elif isinstance(line["longrepr"], str):
message = line["longrepr"]
else:
message = line["longrepr"]["reprcrash"][
"message"
].splitlines()[0]
counter[message] += 1
return counter
def render_results(results, num_rows=20):
table = Table()
table.add_column("Failure message")
table.add_column("Number of occurences")
for msg, num in results.most_common(20):
table.add_row(msg, str(num))
console = Console()
console.print(table)
if __name__ == "__main__":
log_file_name = sys.argv[1]
pattern = sys.argv[2]
render_results(count_failures(log_file_name, pattern), num_rows=20)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/testing/_utils.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
import itertools
import string
import warnings
from collections import abc
from contextlib import contextmanager
from decimal import Decimal
import cupy
import numpy as np
import pandas as pd
import pytest
from numba.core.typing import signature as nb_signature
from numba.core.typing.templates import AbstractTemplate
from numba.cuda.cudadecl import registry as cuda_decl_registry
from numba.cuda.cudaimpl import lower as cuda_lower
from pandas import testing as tm
import cudf
from cudf._lib.null_mask import bitmask_allocation_size_bytes
from cudf.api.types import is_scalar
from cudf.core.column.timedelta import _unit_to_nanoseconds_conversion
from cudf.core.udf.strings_lowering import cast_string_view_to_udf_string
from cudf.core.udf.strings_typing import StringView, string_view, udf_string
from cudf.utils import dtypes as dtypeutils
supported_numpy_dtypes = [
"bool",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"datetime64[ms]",
"datetime64[us]",
]
SIGNED_INTEGER_TYPES = sorted(list(dtypeutils.SIGNED_INTEGER_TYPES))
UNSIGNED_TYPES = sorted(list(dtypeutils.UNSIGNED_TYPES))
INTEGER_TYPES = sorted(list(dtypeutils.INTEGER_TYPES))
FLOAT_TYPES = sorted(list(dtypeutils.FLOAT_TYPES))
SIGNED_TYPES = sorted(list(dtypeutils.SIGNED_TYPES))
NUMERIC_TYPES = sorted(list(dtypeutils.NUMERIC_TYPES))
DATETIME_TYPES = sorted(list(dtypeutils.DATETIME_TYPES))
TIMEDELTA_TYPES = sorted(list(dtypeutils.TIMEDELTA_TYPES))
OTHER_TYPES = sorted(list(dtypeutils.OTHER_TYPES))
ALL_TYPES = sorted(list(dtypeutils.ALL_TYPES))
SERIES_OR_INDEX_NAMES = [
None,
pd.NA,
cudf.NA,
np.nan,
float("NaN"),
"abc",
1,
pd.NaT,
np.datetime64("nat"),
np.timedelta64("NaT"),
np.timedelta64(10, "D"),
np.timedelta64(5, "D"),
np.datetime64("1970-01-01 00:00:00.000000001"),
np.datetime64("1970-01-01 00:00:00.000000002"),
pd.Timestamp(1),
pd.Timestamp(2),
pd.Timedelta(1),
pd.Timedelta(2),
Decimal("NaN"),
Decimal("1.2"),
np.int64(1),
np.int32(1),
np.float32(1),
pd.Timestamp(1),
]
def set_random_null_mask_inplace(series, null_probability=0.5, seed=None):
"""Randomly nullify elements in series with the provided probability."""
probs = [null_probability, 1 - null_probability]
rng = np.random.default_rng(seed=seed)
mask = rng.choice([False, True], size=len(series), p=probs)
series.iloc[mask] = None
# TODO: This function should be removed. Anywhere that it is being used should
# instead be generating a random boolean array (bytemask) and use the public
# APIs to set those elements to None.
def random_bitmask(size):
"""
Parameters
----------
size : int
number of bits
"""
sz = bitmask_allocation_size_bytes(size)
data = np.random.randint(0, 255, dtype="u1", size=sz)
return data.view("i1")
def expand_bits_to_bytes(arr):
def fix_binary(bstr):
bstr = bstr[2:]
diff = 8 - len(bstr)
return ("0" * diff + bstr)[::-1]
ba = bytearray(arr.data)
return list(map(int, "".join(map(fix_binary, map(bin, ba)))))
def count_zero(arr):
arr = np.asarray(arr)
return np.count_nonzero(arr == 0)
def assert_eq(left, right, **kwargs):
"""Assert that two cudf-like things are equivalent
This equality test works for pandas/cudf dataframes/series/indexes/scalars
in the same way, and so makes it easier to perform parametrized testing
without switching between assert_frame_equal/assert_series_equal/...
functions.
"""
# dtypes that we support but Pandas doesn't will convert to
# `object`. Check equality before that happens:
if kwargs.get("check_dtype", True):
if hasattr(left, "dtype") and hasattr(right, "dtype"):
if isinstance(
left.dtype, cudf.core.dtypes._BaseDtype
) and not isinstance(
left.dtype, cudf.CategoricalDtype
): # leave categorical comparison to Pandas
assert_eq(left.dtype, right.dtype)
if hasattr(left, "to_pandas"):
left = left.to_pandas()
if hasattr(right, "to_pandas"):
right = right.to_pandas()
if isinstance(left, cupy.ndarray):
left = cupy.asnumpy(left)
if isinstance(right, cupy.ndarray):
right = cupy.asnumpy(right)
if isinstance(left, (pd.DataFrame, pd.Series, pd.Index)):
# TODO: A warning is emitted from the function
# pandas.testing.assert_[series, frame, index]_equal for some inputs:
# "DeprecationWarning: elementwise comparison failed; this will raise
# an error in the future."
# or "FutureWarning: elementwise ..."
# This warning comes from a call from pandas to numpy. It is ignored
# here because it cannot be fixed within cudf.
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", (DeprecationWarning, FutureWarning)
)
if isinstance(left, pd.DataFrame):
tm.assert_frame_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
tm.assert_series_equal(left, right, **kwargs)
else:
tm.assert_index_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
if np.issubdtype(left.dtype, np.floating) and np.issubdtype(
right.dtype, np.floating
):
assert np.allclose(left, right, equal_nan=True)
else:
assert np.array_equal(left, right)
else:
# Use the overloaded __eq__ of the operands
if left == right:
return True
elif any(np.issubdtype(type(x), np.floating) for x in (left, right)):
np.testing.assert_almost_equal(left, right)
else:
np.testing.assert_equal(left, right)
return True
def assert_neq(left, right, **kwargs):
__tracebackhide__ = True
try:
assert_eq(left, right, **kwargs)
except AssertionError:
pass
else:
raise AssertionError
def assert_exceptions_equal(
lfunc,
rfunc,
lfunc_args_and_kwargs=None,
rfunc_args_and_kwargs=None,
check_exception_type=True,
):
"""Compares if two functions ``lfunc`` and ``rfunc`` raise
same exception or not.
Parameters
----------
lfunc : callable
A callable function to obtain the Exception.
rfunc : callable
A callable function to compare the Exception
obtained by calling ``rfunc``.
lfunc_args_and_kwargs : tuple, default None
Tuple containing positional arguments at first position,
and key-word arguments at second position that need to be passed into
``lfunc``. If the tuple is of length 1, it must either contain
positional arguments(as a Sequence) or key-word arguments(as a Mapping
dict).
rfunc_args_and_kwargs : tuple, default None
Tuple containing positional arguments at first position,
and key-word arguments at second position that need to be passed into
``rfunc``. If the tuple is of length 1, it must either contain
positional arguments(as a Sequence) or key-word arguments(as a Mapping
dict).
check_exception_type : boolean, default True
Whether to compare the exception types raised by ``lfunc``
with ``rfunc`` exception type or not. If False, ``rfunc``
is simply evaluated against `Exception` type.
Returns
-------
None
If exceptions raised by ``lfunc`` and
``rfunc`` match.
Raises
------
AssertionError
If call to ``lfunc`` doesn't raise any Exception.
"""
lfunc_args, lfunc_kwargs = _get_args_kwars_for_assert_exceptions(
lfunc_args_and_kwargs
)
rfunc_args, rfunc_kwargs = _get_args_kwars_for_assert_exceptions(
rfunc_args_and_kwargs
)
try:
lfunc(*lfunc_args, **lfunc_kwargs)
except KeyboardInterrupt:
raise
except Exception as e:
with pytest.raises(type(e) if check_exception_type else Exception):
rfunc(*rfunc_args, **rfunc_kwargs)
else:
raise AssertionError("Expected to fail with an Exception.")
def _get_args_kwars_for_assert_exceptions(func_args_and_kwargs):
if func_args_and_kwargs is None:
return [], {}
else:
if len(func_args_and_kwargs) == 1:
func_args, func_kwargs = [], {}
if isinstance(func_args_and_kwargs[0], abc.Sequence):
func_args = func_args_and_kwargs[0]
elif isinstance(func_args_and_kwargs[0], abc.Mapping):
func_kwargs = func_args_and_kwargs[0]
else:
raise ValueError(
"length 1 func_args_and_kwargs must be "
"either a Sequence or a Mapping"
)
elif len(func_args_and_kwargs) == 2:
if not isinstance(func_args_and_kwargs[0], abc.Sequence):
raise ValueError(
"Positional argument at 1st position of "
"func_args_and_kwargs should be a sequence."
)
if not isinstance(func_args_and_kwargs[1], abc.Mapping):
raise ValueError(
"Key-word argument at 2nd position of "
"func_args_and_kwargs should be a dictionary mapping."
)
func_args, func_kwargs = func_args_and_kwargs
else:
raise ValueError("func_args_and_kwargs must be of length 1 or 2")
return func_args, func_kwargs
def gen_rand(dtype, size, **kwargs):
dtype = cudf.dtype(dtype)
if dtype.kind == "f":
res = np.random.random(size=size).astype(dtype)
if kwargs.get("positive_only", False):
return res
else:
return res * 2 - 1
elif dtype == np.int8 or dtype == np.int16:
low = kwargs.get("low", -32)
high = kwargs.get("high", 32)
return np.random.randint(low=low, high=high, size=size).astype(dtype)
elif dtype.kind == "i":
low = kwargs.get("low", -10000)
high = kwargs.get("high", 10000)
return np.random.randint(low=low, high=high, size=size).astype(dtype)
elif dtype == np.uint8 or dtype == np.uint16:
low = kwargs.get("low", 0)
high = kwargs.get("high", 32)
return np.random.randint(low=low, high=high, size=size).astype(dtype)
elif dtype.kind == "u":
low = kwargs.get("low", 0)
high = kwargs.get("high", 128)
return np.random.randint(low=low, high=high, size=size).astype(dtype)
elif dtype.kind == "b":
low = kwargs.get("low", 0)
high = kwargs.get("high", 2)
return np.random.randint(low=low, high=high, size=size).astype(
np.bool_
)
elif dtype.kind == "M":
low = kwargs.get("low", 0)
time_unit, _ = np.datetime_data(dtype)
high = kwargs.get(
"high",
int(1e18) / _unit_to_nanoseconds_conversion[time_unit],
)
return pd.to_datetime(
np.random.randint(low=low, high=high, size=size), unit=time_unit
)
elif dtype.kind in ("O", "U"):
low = kwargs.get("low", 10)
high = kwargs.get("high", 11)
nchars = np.random.randint(low=low, high=high, size=1)[0]
char_options = np.array(list(string.ascii_letters + string.digits))
all_chars = "".join(np.random.choice(char_options, nchars * size))
return np.array(
[all_chars[nchars * i : nchars * (i + 1)] for i in range(size)]
)
raise NotImplementedError(f"dtype.kind={dtype.kind}")
def gen_rand_series(dtype, size, **kwargs):
values = gen_rand(dtype, size, **kwargs)
if kwargs.get("has_nulls", False):
return cudf.Series.from_masked_array(values, random_bitmask(size))
return cudf.Series(values)
def _decimal_series(input, dtype):
return cudf.Series(
[x if x is None else Decimal(x) for x in input],
dtype=dtype,
)
@contextmanager
def does_not_raise():
yield
def assert_column_memory_eq(
lhs: cudf.core.column.ColumnBase, rhs: cudf.core.column.ColumnBase
):
"""Assert the memory location and size of `lhs` and `rhs` are equivalent.
Both data pointer and mask pointer are checked. Also recursively check for
children to the same constraints. Also fails check if the number of
children mismatches at any level.
"""
def get_ptr(x) -> int:
return x.get_ptr(mode="read") if x else 0
assert get_ptr(lhs.base_data) == get_ptr(rhs.base_data)
assert get_ptr(lhs.base_mask) == get_ptr(rhs.base_mask)
assert lhs.base_size == rhs.base_size
assert lhs.offset == rhs.offset
assert lhs.size == rhs.size
assert len(lhs.base_children) == len(rhs.base_children)
for lhs_child, rhs_child in zip(lhs.base_children, rhs.base_children):
assert_column_memory_eq(lhs_child, rhs_child)
if isinstance(lhs, cudf.core.column.CategoricalColumn) and isinstance(
rhs, cudf.core.column.CategoricalColumn
):
assert_column_memory_eq(lhs.categories, rhs.categories)
assert_column_memory_eq(lhs.codes, rhs.codes)
def assert_column_memory_ne(
lhs: cudf.core.column.ColumnBase, rhs: cudf.core.column.ColumnBase
):
try:
assert_column_memory_eq(lhs, rhs)
except AssertionError:
return
raise AssertionError("lhs and rhs holds the same memory.")
def _create_pandas_series_float64_default(
data=None, index=None, dtype=None, *args, **kwargs
):
# Wrapper around pd.Series using a float64
# default dtype for empty data to silence warnings.
# TODO: Remove this in pandas-2.0 upgrade
if dtype is None and (
data is None or (not is_scalar(data) and len(data) == 0)
):
dtype = "float64"
return pd.Series(data=data, index=index, dtype=dtype, *args, **kwargs)
def _create_cudf_series_float64_default(
data=None, index=None, dtype=None, *args, **kwargs
):
# Wrapper around cudf.Series using a float64
# default dtype for empty data to silence warnings.
# TODO: Remove this in pandas-2.0 upgrade
if dtype is None and (
data is None or (not is_scalar(data) and len(data) == 0)
):
dtype = "float64"
return cudf.Series(data=data, index=index, dtype=dtype, *args, **kwargs)
parametrize_numeric_dtypes_pairwise = pytest.mark.parametrize(
"left_dtype,right_dtype",
list(itertools.combinations_with_replacement(NUMERIC_TYPES, 2)),
)
@contextmanager
def expect_warning_if(condition, warning=FutureWarning, *args, **kwargs):
"""Catch a warning using pytest.warns if the expect_warning is True.
All arguments are forwarded to pytest.warns if expect_warning is True.
"""
if condition:
with pytest.warns(warning, *args, **kwargs):
yield
else:
yield
def sv_to_udf_str(sv):
"""
Cast a string_view object to a udf_string object
This placeholder function never runs in python
It exists only for numba to have something to replace
with the typing and lowering code below
This is similar conceptually to needing a translation
engine to emit an expression in target language "B" when
there is no equivalent in the source language "A" to
translate from. This function effectively defines the
expression in language "A" and the associated typing
and lowering describe the translation process, despite
the expression having no meaning in language "A"
"""
pass
@cuda_decl_registry.register_global(sv_to_udf_str)
class StringViewToUDFStringDecl(AbstractTemplate):
def generic(self, args, kws):
if isinstance(args[0], StringView) and len(args) == 1:
return nb_signature(udf_string, string_view)
@cuda_lower(sv_to_udf_str, string_view)
def sv_to_udf_str_testing_lowering(context, builder, sig, args):
return cast_string_view_to_udf_string(
context, builder, sig.args[0], sig.return_type, args[0]
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/testing/testing.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
from typing import Union
import cupy as cp
import numpy as np
import pandas as pd
import cudf
from cudf._lib.unary import is_nan
from cudf.api.types import (
is_categorical_dtype,
is_decimal_dtype,
is_interval_dtype,
is_list_dtype,
is_numeric_dtype,
is_string_dtype,
is_struct_dtype,
)
from cudf.core.missing import NA, NaT
def dtype_can_compare_equal_to_other(dtype):
# return True if values of this dtype can compare
# as equal to equal values of a different dtype
return not (
is_string_dtype(dtype)
or is_list_dtype(dtype)
or is_struct_dtype(dtype)
or is_decimal_dtype(dtype)
or is_interval_dtype(dtype)
)
def _check_isinstance(left, right, obj):
if not isinstance(left, obj):
raise AssertionError(
f"{obj} Expected type {obj}, found {type(left)} instead"
)
elif not isinstance(right, obj):
raise AssertionError(
f"{obj} Expected type {obj}, found {type(right)} instead"
)
def raise_assert_detail(obj, message, left, right, diff=None):
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def _check_types(
left, right, check_categorical=True, exact="equiv", obj="Index"
):
if not exact or exact == "equiv":
if (
isinstance(left, cudf.RangeIndex)
and isinstance(
right,
(
cudf.Int8Index,
cudf.Int16Index,
cudf.Int32Index,
cudf.Int64Index,
),
)
) or (
isinstance(right, cudf.RangeIndex)
and isinstance(
left,
(
cudf.Int8Index,
cudf.Int16Index,
cudf.Int32Index,
cudf.Int64Index,
),
)
):
return
if type(left) != type(right):
raise_assert_detail(
obj, "Class types are different", f"{type(left)}", f"{type(right)}"
)
if (
exact
and not isinstance(left, cudf.MultiIndex)
and is_categorical_dtype(left)
):
if left.dtype != right.dtype:
raise_assert_detail(
obj, "Categorical difference", f"{left}", f"{right}"
)
def assert_column_equal(
left,
right,
check_dtype=True,
check_column_type="equiv",
check_less_precise=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
rtol=1e-05,
atol=1e-08,
obj="ColumnBase",
):
"""
Check that left and right columns are equal
This function is intended to compare two columns and output
any differences. Additional parameters allow varying the strictness
of the equality checks performed.
Parameters
----------
left : Column
left Column to compare
right : Column
right Column to compare
check_dtype : bool, default True
Whether to check the Column dtype is identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and
inferred_type are identical. Currently it is idle,
and similar to pandas.
check_less_precise : bool or int, default False
Not yet supported
check_exact : bool, default False
Whether to compare number exactly.
check_datetime_like_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals
rtol : float, default 1e-5
Relative tolerance. Only used when `check_exact` is False.
atol : float, default 1e-8
Absolute tolerance. Only used when `check_exact` is False.
obj : str, default 'ColumnBase'
Specify object name being compared, internally used to
show appropriate assertion message.
"""
if check_dtype is True:
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
if type(left) != type(right) or left.dtype != right.dtype:
msg1 = f"{left.dtype}"
msg2 = f"{right.dtype}"
raise_assert_detail(obj, "Dtypes are different", msg1, msg2)
else:
if left.null_count == len(left) and right.null_count == len(right):
return True
if check_datetimelike_compat:
if np.issubdtype(left.dtype, np.datetime64):
right = right.astype(left.dtype)
elif np.issubdtype(right.dtype, np.datetime64):
left = left.astype(right.dtype)
if np.issubdtype(left.dtype, np.datetime64):
if not left.equals(right):
raise AssertionError(
f"[datetimelike_compat=True] {left.values} "
f"is not equal to {right.values}."
)
return
if check_exact and check_categorical:
if is_categorical_dtype(left) and is_categorical_dtype(right):
left_cat = left.categories
right_cat = right.categories
if check_category_order:
assert_index_equal(
left_cat,
right_cat,
exact=check_dtype,
check_exact=True,
check_categorical=False,
rtol=rtol,
atol=atol,
)
assert_column_equal(
left.codes,
right.codes,
check_dtype=check_dtype,
check_exact=True,
check_categorical=False,
check_category_order=False,
rtol=rtol,
atol=atol,
)
if left.ordered != right.ordered:
msg1 = f"{left.ordered}"
msg2 = f"{right.ordered}"
raise_assert_detail(
f"{obj} category", "Orders are different", msg1, msg2
)
if (
not check_dtype
and is_categorical_dtype(left)
and is_categorical_dtype(right)
):
left = left.astype(left.categories.dtype)
right = right.astype(right.categories.dtype)
columns_equal = False
if left.size == right.size == 0:
columns_equal = True
elif not (
(
not dtype_can_compare_equal_to_other(left.dtype)
and is_numeric_dtype(right)
)
or (
is_numeric_dtype(left)
and not dtype_can_compare_equal_to_other(right)
)
):
try:
# nulls must be in the same places for all dtypes
columns_equal = cp.all(
left.isnull().values == right.isnull().values
)
if columns_equal and not check_exact and is_numeric_dtype(left):
# non-null values must be the same
columns_equal = cp.allclose(
left.apply_boolean_mask(
left.isnull().unary_operator("not")
).values,
right.apply_boolean_mask(
right.isnull().unary_operator("not")
).values,
)
if columns_equal and (
left.dtype.kind == right.dtype.kind == "f"
):
columns_equal = cp.all(
is_nan(left).values == is_nan(right).values
)
else:
columns_equal = left.equals(right)
except TypeError as e:
if str(e) != "Categoricals can only compare with the same type":
raise e
else:
columns_equal = False
if is_categorical_dtype(left) and is_categorical_dtype(right):
left = left.astype(left.categories.dtype)
right = right.astype(right.categories.dtype)
if not columns_equal:
ldata = str([val for val in left.to_pandas(nullable=True)])
rdata = str([val for val in right.to_pandas(nullable=True)])
try:
diff = 0
for i in range(left.size):
if not null_safe_scalar_equals(left[i], right[i]):
diff += 1
diff = diff * 100.0 / left.size
except BaseException:
diff = 100.0
raise_assert_detail(
obj,
f"values are different ({np.round(diff, 5)} %)",
{ldata},
{rdata},
)
def null_safe_scalar_equals(left, right):
if left in {NA, NaT, np.nan} or right in {NA, NaT, np.nan}:
return left is right
return left == right
def assert_index_equal(
left,
right,
exact="equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1e-5,
atol: float = 1e-8,
obj: str = "Index",
):
"""
Check that left and right Index are equal
This function is intended to compare two Index and output
any differences. Additional parameters allow varying the strictness
of the equality checks performed.
Parameters
----------
left : Index
left Index to compare
right : Index
right Index to compare
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted
for Int8Index, Int16Index, Int32Index, Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Not yet supported
check_exact : bool, default False
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as
well as their values.
If True, both indexes must contain the same elements,
in the same order.
If False, both indexes must contain the same elements,
but in any order.
rtol : float, default 1e-5
Relative tolerance. Only used when `check_exact` is False.
atol : float, default 1e-8
Absolute tolerance. Only used when `check_exact` is False.
obj : str, default 'Index'
Specify object name being compared, internally used to
show appropriate assertion message.
Examples
--------
>>> import cudf
>>> id1 = cudf.Index([1, 2, 3, 4])
>>> id2 = cudf.Index([1, 2, 3, 5])
>>> cudf.testing.assert_index_equal(id1, id2)
......
......
AssertionError: ColumnBase are different
<BLANKLINE>
values are different (25.0 %)
[left]: [1 2 3 4]
[right]: [1 2 3 5]
>>> id2 = cudf.Index([1, 2, 3, 4], name="b")
>>> cudf.testing.assert_index_equal(id1, id2)
......
......
AssertionError: Index are different
<BLANKLINE>
name mismatch
[left]: a
[right]: b
This will pass without any hitch:
>>> id2 = cudf.Index([1, 2, 3, 4], name="a")
>>> cudf.testing.assert_index_equal(id1, id2)
"""
# instance validation
_check_isinstance(left, right, cudf.BaseIndex)
_check_types(
left, right, exact=exact, check_categorical=check_categorical, obj=obj
)
if len(left) != len(right):
raise_assert_detail(
obj, "lengths are different", f"{len(left)}", f"{len(right)}"
)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
if isinstance(left, cudf.MultiIndex):
if left.nlevels != right.nlevels:
raise AssertionError(
"Number of levels mismatch, "
f"left has {left.nlevels} levels and right has {right.nlevels}"
)
for level in range(left.nlevels):
llevel = cudf.Index(left._columns[level], name=left.names[level])
rlevel = cudf.Index(right._columns[level], name=right.names[level])
mul_obj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=check_exact,
check_names=check_names,
check_exact=check_exact,
check_less_precise=check_less_precise,
check_order=check_order,
rtol=rtol,
atol=atol,
obj=mul_obj,
)
return
assert_column_equal(
left._columns[0],
right._columns[0],
check_dtype=exact,
check_exact=check_exact,
check_categorical=check_categorical,
obj=obj,
)
# metadata comparison
if check_names and (left.name != right.name):
raise_assert_detail(
obj, "name mismatch", f"{left.name}", f"{right.name}"
)
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
rtol=1e-5,
atol=1e-8,
obj="Series",
):
"""
Check that left and right Series are equal
This function is intended to compare two Series and output
any differences. Additional parameters allow varying the strictness
of the equality checks performed.
Parameters
----------
left : Series
left Series to compare
right : Series
right Series to compare
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the series class, dtype and
inferred_type are identical. Currently it is idle,
and similar to pandas.
check_less_precise : bool or int, default False
Not yet supported
check_names : bool, default True
Whether to check that the names attribute for both the index
and column attributes of the Series is identical.
check_exact : bool, default False
Whether to compare number exactly.
check_datetime_like_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals
rtol : float, default 1e-5
Relative tolerance. Only used when `check_exact` is False.
atol : float, default 1e-8
Absolute tolerance. Only used when `check_exact` is False.
obj : str, default 'Series'
Specify object name being compared, internally used to
show appropriate assertion message.
Examples
--------
>>> import cudf
>>> sr1 = cudf.Series([1, 2, 3, 4], name="a")
>>> sr2 = cudf.Series([1, 2, 3, 5], name="b")
>>> cudf.testing.assert_series_equal(sr1, sr2)
......
......
AssertionError: ColumnBase are different
<BLANKLINE>
values are different (25.0 %)
[left]: [1 2 3 4]
[right]: [1 2 3 5]
>>> sr2 = cudf.Series([1, 2, 3, 4], name="b")
>>> cudf.testing.assert_series_equal(sr1, sr2)
......
......
AssertionError: Series are different
<BLANKLINE>
name mismatch
[left]: a
[right]: b
This will pass without any hitch:
>>> sr2 = cudf.Series([1, 2, 3, 4], name="a")
>>> cudf.testing.assert_series_equal(sr1, sr2)
"""
# instance validation
_check_isinstance(left, right, cudf.Series)
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
assert_column_equal(
left._column,
right._column,
check_dtype=check_dtype,
check_column_type=check_series_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_category_order=check_category_order,
rtol=rtol,
atol=atol,
)
# metadata comparison
if check_names and (left.name != right.name):
raise_assert_detail(
obj, "name mismatch", f"{left.name}", f"{right.name}"
)
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
rtol=1e-5,
atol=1e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal
This function is intended to compare two DataFrame and output
any differences. Additional parameters allow varying the strictness
of the equality checks performed.
Parameters
----------
left : DataFrame
left DataFrame to compare
right : DataFrame
right DataFrame to compare
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool, default True
Whether to check the column class, dtype and
inferred_type are identical. Currently it is idle,
and similar to pandas.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_names : bool, default True
Whether to check that the names attribute for both the index and
column attributes of the DataFrame is identical.
check_exact : bool, default False
Whether to compare number exactly.
by_blocks : bool, default False
Not supported
check_exact : bool, default False
Whether to compare number exactly.
check_datetime_like_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective
rows (same as in columns) - same labels must be with the same data.
rtol : float, default 1e-5
Relative tolerance. Only used when `check_exact` is False.
atol : float, default 1e-8
Absolute tolerance. Only used when `check_exact` is False.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to
show appropriate assertion message.
Examples
--------
>>> import cudf
>>> df1 = cudf.DataFrame({"a":[1, 2], "b":[1.0, 2.0]}, index=[1, 2])
>>> df2 = cudf.DataFrame({"a":[1, 2], "b":[1.0, 2.0]}, index=[2, 3])
>>> cudf.testing.assert_frame_equal(df1, df2)
......
......
AssertionError: ColumnBase are different
<BLANKLINE>
values are different (100.0 %)
[left]: [1 2]
[right]: [2 3]
>>> df2 = cudf.DataFrame({"a":[1, 2], "c":[1.0, 2.0]}, index=[1, 2])
>>> cudf.testing.assert_frame_equal(df1, df2)
......
......
AssertionError: DataFrame.columns are different
<BLANKLINE>
DataFrame.columns values are different (50.0 %)
[left]: Index(['a', 'b'], dtype='object')
right]: Index(['a', 'c'], dtype='object')
>>> df2 = cudf.DataFrame({"a":[1, 2], "b":[1.0, 3.0]}, index=[1, 2])
>>> cudf.testing.assert_frame_equal(df1, df2)
......
......
AssertionError: Column name="b" are different
<BLANKLINE>
values are different (50.0 %)
[left]: [1. 2.]
[right]: [1. 3.]
This will pass without any hitch:
>>> df2 = cudf.DataFrame({"a":[1, 2], "b":[1.0, 2.0]}, index=[1, 2])
>>> cudf.testing.assert_frame_equal(df1, df2)
"""
_check_isinstance(left, right, cudf.DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# shape comparison
if left.shape != right.shape:
raise AssertionError("left and right shape mismatch")
if check_like:
left, right = left.reindex(index=right.index), right
right = right[list(left._data.names)]
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
pd.testing.assert_index_equal(
left._data.to_pandas_index(),
right._data.to_pandas_index(),
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
for col in left._column_names:
assert_column_equal(
left._data[col],
right._data[col],
check_dtype=check_dtype,
check_exact=check_exact,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f'Column name="{col}"',
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/testing/__init__.py
|
# Copyright (c) 2020, NVIDIA CORPORATION.
from cudf.testing.testing import (
assert_frame_equal,
assert_index_equal,
assert_series_equal,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/testing/dataset_generator.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
# This module is for generating "synthetic" datasets. It was originally
# designed for testing filtered reading. Generally, it should be useful
# if you want to generate data where certain phenomena (e.g., cardinality)
# are exaggerated.
import copy
import random
import string
import uuid
from multiprocessing import Pool
import mimesis
import numpy as np
import pandas as pd
import pyarrow as pa
from mimesis import Generic
from pyarrow import parquet as pq
import cudf
from cudf.utils.dtypes import np_to_pa_dtype
class ColumnParameters:
"""Parameters for generating column of data
Attributes
----------
cardinality : int or None
Size of a random set of values that generated data is sampled from.
The values in the random set are derived from the given generator.
If cardinality is None, the Iterable returned by the given generator
is invoked for each value to be generated.
null_frequency : 0.1
Probability of a generated value being null
generator : Callable
Function for generating random data. It is passed a Mimesis Generic
provider and returns an Iterable that generates data.
is_sorted : bool
Sort this column. Columns are sorted in same order as ColumnParameters
instances stored in column_params of Parameters. If there are one or
more columns marked as sorted, the generated PyArrow Table will be
converted to a Pandas DataFrame to do the sorting. This may implicitly
convert numbers to floats in the presence of nulls.
dtype : optional
a numpy dtype to control the format of the data
"""
def __init__(
self,
cardinality=100,
null_frequency=0.1,
generator=lambda g: [g.address.country for _ in range(100)],
is_sorted=True,
dtype=None,
):
self.cardinality = cardinality
self.null_frequency = null_frequency
self.generator = generator
self.is_sorted = is_sorted
self.dtype = dtype
class Parameters:
"""Parameters for random dataset generation
Attributes
----------
num_rows : int
Number of rows to generate
column_parameters : List[ColumnParams]
ColumnParams for each column
seed : int or None, default None
Seed for random data generation
"""
def __init__(
self,
num_rows=2048,
column_parameters=None,
seed=None,
):
self.num_rows = num_rows
if column_parameters is None:
column_parameters = []
self.column_parameters = column_parameters
self.seed = seed
def _write(tbl, path, format):
if format["name"] == "parquet":
if isinstance(tbl, pa.Table):
pq.write_table(tbl, path, row_group_size=format["row_group_size"])
elif isinstance(tbl, pd.DataFrame):
tbl.to_parquet(path, row_group_size=format["row_group_size"])
def _generate_column(column_params, num_rows):
# If cardinality is specified, we create a set to sample from.
# Otherwise, we simply use the given generator to generate each value.
if column_params.cardinality is not None:
# Construct set of values to sample from where
# set size = cardinality
if (
isinstance(column_params.dtype, str)
and column_params.dtype == "category"
):
vals = pa.array(
column_params.generator,
size=column_params.cardinality,
safe=False,
)
return pa.DictionaryArray.from_arrays(
dictionary=vals,
indices=np.random.randint(
low=0, high=len(vals), size=num_rows
),
mask=np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
if column_params.null_frequency > 0.0
else None,
)
if hasattr(column_params.dtype, "to_arrow"):
arrow_type = column_params.dtype.to_arrow()
elif column_params.dtype is not None:
arrow_type = np_to_pa_dtype(cudf.dtype(column_params.dtype))
else:
arrow_type = None
if isinstance(column_params.dtype, cudf.StructDtype):
vals = pa.StructArray.from_arrays(
column_params.generator,
names=column_params.dtype.fields.keys(),
mask=pa.array(
np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
)
if column_params.null_frequency > 0.0
else None,
)
return vals
elif not isinstance(arrow_type, pa.lib.Decimal128Type):
vals = pa.array(
column_params.generator,
size=column_params.cardinality,
safe=False,
type=arrow_type,
)
vals = pa.array(
np.random.choice(column_params.generator, size=num_rows)
if isinstance(arrow_type, pa.lib.Decimal128Type)
else np.random.choice(vals, size=num_rows),
mask=np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
if column_params.null_frequency > 0.0
else None,
size=num_rows,
safe=False,
type=None
if isinstance(arrow_type, pa.lib.Decimal128Type)
else arrow_type,
)
if isinstance(arrow_type, pa.lib.Decimal128Type):
vals = vals.cast(arrow_type, safe=False)
return vals
else:
# Generate data for current column
return pa.array(
column_params.generator,
mask=np.random.choice(
[True, False],
size=num_rows,
p=[
column_params.null_frequency,
1 - column_params.null_frequency,
],
)
if column_params.null_frequency > 0.0
else None,
size=num_rows,
safe=False,
)
def generate(
path,
parameters,
format=None,
use_threads=True,
):
"""
Generate dataset using given parameters and write to given format
Parameters
----------
path : str or file-like object
Path to write to
parameters : Parameters
Parameters specifying how to randomly generate data
format : Dict
Format to write
"""
if format is None:
format = {"name": "parquet", "row_group_size": 64}
df = get_dataframe(parameters, use_threads)
# Write
_write(df, path, format)
def get_dataframe(parameters, use_threads):
# Initialize seeds
if parameters.seed is not None:
np.random.seed(parameters.seed)
# For each column, use a generic Mimesis producer to create an Iterable
# for generating data
for i, column_params in enumerate(parameters.column_parameters):
if column_params.dtype is None:
column_params.generator = column_params.generator(
Generic("en", seed=parameters.seed)
)
else:
column_params.generator = column_params.generator()
# Get schema for each column
table_fields = []
for i, column_params in enumerate(parameters.column_parameters):
if (
isinstance(column_params.dtype, str)
and column_params.dtype == "category"
):
arrow_type = pa.dictionary(
index_type=pa.int64(),
value_type=np_to_pa_dtype(
cudf.dtype(type(next(iter(column_params.generator))))
),
)
elif hasattr(column_params.dtype, "to_arrow"):
arrow_type = column_params.dtype.to_arrow()
else:
arrow_type = np_to_pa_dtype(
cudf.dtype(type(next(iter(column_params.generator))))
if column_params.dtype is None
else column_params.dtype
)
table_fields.append(
pa.field(
name=str(i),
type=arrow_type,
nullable=column_params.null_frequency > 0,
)
)
schema = pa.schema(table_fields)
# Initialize column data and which columns should be sorted
column_data = [None] * len(parameters.column_parameters)
columns_to_sort = [
str(i)
for i, column_params in enumerate(parameters.column_parameters)
if column_params.is_sorted
]
# Generate data
if not use_threads:
for i, column_params in enumerate(parameters.column_parameters):
column_data[i] = _generate_column(
column_params, parameters.num_rows
)
else:
pool = Pool(pa.cpu_count())
column_data = pool.starmap(
_generate_column,
[
(column_params, parameters.num_rows)
for i, column_params in enumerate(parameters.column_parameters)
],
)
pool.close()
pool.join()
# Convert to Pandas DataFrame and sort columns appropriately
tbl = pa.Table.from_arrays(
column_data,
schema=schema,
)
if columns_to_sort:
tbl = tbl.to_pandas()
tbl = tbl.sort_values(columns_to_sort)
tbl = pa.Table.from_pandas(tbl, schema)
return tbl
def rand_dataframe(
dtypes_meta, rows, seed=random.randint(0, 2**32 - 1), use_threads=True
):
"""
Generates a random table.
Parameters
----------
dtypes_meta : List of dict
Specifies list of dtype meta data. dtype meta data should
be a dictionary of the form example:
{"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}
`"str"` dtype can contain an extra key `max_string_length` to
control the maximum size of the strings being generated in each row.
If not specified, it will default to 1000.
rows : int
Specifies the number of rows to be generated.
seed : int
Specifies the `seed` value to be utilized by all downstream
random data generation APIs.
use_threads : bool
Indicates whether to use threads pools to build the columns
Returns
-------
PyArrow Table
A Table with columns of corresponding dtypes mentioned in `dtypes_meta`
"""
# Apply seed
random.seed(seed)
np.random.seed(seed)
mimesis.random.random.seed(seed)
column_params = []
for meta in dtypes_meta:
dtype = copy.deepcopy(meta["dtype"])
null_frequency = copy.deepcopy(meta["null_frequency"])
cardinality = copy.deepcopy(meta["cardinality"])
if dtype == "list":
lists_max_length = meta["lists_max_length"]
nesting_max_depth = meta["nesting_max_depth"]
value_type = meta["value_type"]
nesting_depth = np.random.randint(1, nesting_max_depth)
dtype = cudf.core.dtypes.ListDtype(value_type)
# Determining the `dtype` from the `value_type`
# and the nesting_depth
i = 1
while i < nesting_depth:
dtype = cudf.core.dtypes.ListDtype(dtype)
i += 1
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=list_generator(
dtype=value_type,
size=cardinality,
nesting_depth=nesting_depth,
lists_max_length=lists_max_length,
),
is_sorted=False,
dtype=dtype,
)
)
elif dtype == "struct":
nesting_max_depth = meta["nesting_max_depth"]
max_types_at_each_level = meta["max_types_at_each_level"]
max_null_frequency = meta["max_null_frequency"]
nesting_depth = np.random.randint(1, nesting_max_depth)
structDtype = create_nested_struct_type(
max_types_at_each_level=max_types_at_each_level,
nesting_level=nesting_depth,
)
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=struct_generator(
dtype=structDtype,
cardinality=cardinality,
size=rows,
max_null_frequency=max_null_frequency,
),
is_sorted=False,
dtype=structDtype,
)
)
elif dtype == "decimal64":
max_precision = meta.get(
"max_precision", cudf.Decimal64Dtype.MAX_PRECISION
)
precision = np.random.randint(1, max_precision)
scale = np.random.randint(0, precision)
dtype = cudf.Decimal64Dtype(precision=precision, scale=scale)
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=decimal_generator(dtype=dtype, size=cardinality),
is_sorted=False,
dtype=dtype,
)
)
elif dtype == "decimal32":
max_precision = meta.get(
"max_precision", cudf.Decimal32Dtype.MAX_PRECISION
)
precision = np.random.randint(1, max_precision)
scale = np.random.randint(0, precision)
dtype = cudf.Decimal32Dtype(precision=precision, scale=scale)
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=decimal_generator(dtype=dtype, size=cardinality),
is_sorted=False,
dtype=dtype,
)
)
elif dtype == "decimal128":
max_precision = meta.get(
"max_precision", cudf.Decimal128Dtype.MAX_PRECISION
)
precision = np.random.randint(1, max_precision)
scale = np.random.randint(0, precision)
dtype = cudf.Decimal128Dtype(precision=precision, scale=scale)
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=decimal_generator(dtype=dtype, size=cardinality),
is_sorted=False,
dtype=dtype,
)
)
elif dtype == "category":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=lambda cardinality=cardinality: [
_unique_string() for _ in range(cardinality)
],
is_sorted=False,
dtype="category",
)
)
else:
dtype = cudf.dtype(dtype)
if dtype.kind in ("i", "u"):
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=int_generator(
dtype=dtype,
size=cardinality,
min_bound=meta.get("min_bound", None),
max_bound=meta.get("max_bound", None),
),
is_sorted=False,
dtype=dtype,
)
)
elif dtype.kind == "f":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=float_generator(
dtype=dtype,
size=cardinality,
min_bound=meta.get("min_bound", None),
max_bound=meta.get("max_bound", None),
),
is_sorted=False,
dtype=dtype,
)
)
elif dtype.kind in ("U", "O"):
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=lambda cardinality=cardinality: [
_generate_string(
string.printable,
np.random.randint(
low=0,
high=meta.get("max_string_length", 1000),
size=1,
)[0],
)
for _ in range(cardinality)
],
is_sorted=False,
dtype=dtype,
)
)
elif dtype.kind == "M":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=datetime_generator(
dtype=dtype,
size=cardinality,
min_bound=meta.get("min_bound", None),
max_bound=meta.get("max_bound", None),
),
is_sorted=False,
dtype=cudf.dtype(dtype),
)
)
elif dtype.kind == "m":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=timedelta_generator(
dtype=dtype,
size=cardinality,
min_bound=meta.get("min_bound", None),
max_bound=meta.get("max_bound", None),
),
is_sorted=False,
dtype=cudf.dtype(dtype),
)
)
elif dtype.kind == "b":
column_params.append(
ColumnParameters(
cardinality=cardinality,
null_frequency=null_frequency,
generator=boolean_generator(cardinality),
is_sorted=False,
dtype=cudf.dtype(dtype),
)
)
else:
raise TypeError(f"Unsupported dtype: {dtype}")
# TODO: Add List column support once
# https://github.com/rapidsai/cudf/pull/6075
# is merged.
df = get_dataframe(
Parameters(
num_rows=rows,
column_parameters=column_params,
seed=seed,
),
use_threads=use_threads,
)
return df
def int_generator(dtype, size, min_bound=None, max_bound=None):
"""
Generator for int data
"""
if min_bound is not None and max_bound is not None:
low, high = min_bound, max_bound
else:
iinfo = np.iinfo(dtype)
low, high = iinfo.min, iinfo.max
return lambda: np.random.randint(
low=low,
high=high,
size=size,
dtype=dtype,
)
def float_generator(dtype, size, min_bound=None, max_bound=None):
"""
Generator for float data
"""
if min_bound is not None and max_bound is not None:
low, high = min_bound, max_bound
return lambda: np.random.uniform(
low=low,
high=high,
size=size,
)
else:
finfo = np.finfo(dtype)
return (
lambda: np.random.uniform(
low=finfo.min / 2,
high=finfo.max / 2,
size=size,
)
* 2
)
def datetime_generator(dtype, size, min_bound=None, max_bound=None):
"""
Generator for datetime data
"""
if min_bound is not None and max_bound is not None:
low, high = min_bound, max_bound
else:
iinfo = np.iinfo("int64")
low, high = iinfo.min + 1, iinfo.max
return lambda: np.random.randint(
low=np.datetime64(low, "ns").astype(dtype).astype("int"),
high=np.datetime64(high, "ns").astype(dtype).astype("int"),
size=size,
)
def timedelta_generator(dtype, size, min_bound=None, max_bound=None):
"""
Generator for timedelta data
"""
if min_bound is not None and max_bound is not None:
low, high = min_bound, max_bound
else:
iinfo = np.iinfo("int64")
low, high = iinfo.min + 1, iinfo.max
return lambda: np.random.randint(
low=np.timedelta64(low, "ns").astype(dtype).astype("int"),
high=np.timedelta64(high, "ns").astype(dtype).astype("int"),
size=size,
)
def boolean_generator(size):
"""
Generator for bool data
"""
return lambda: np.random.choice(a=[False, True], size=size)
def decimal_generator(dtype, size):
max_integral = 10 ** (dtype.precision - dtype.scale) - 1
max_float = (10**dtype.scale - 1) if dtype.scale != 0 else 0
return lambda: (
np.random.uniform(
low=-max_integral,
high=max_integral + (max_float / 10**dtype.scale),
size=size,
)
)
def get_values_for_nested_data(dtype, lists_max_length=None, size=None):
"""
Returns list of values based on dtype.
"""
if size is None:
cardinality = np.random.randint(0, lists_max_length)
else:
cardinality = size
dtype = cudf.dtype(dtype)
if dtype.kind in ("i", "u"):
values = int_generator(dtype=dtype, size=cardinality)()
elif dtype.kind == "f":
values = float_generator(dtype=dtype, size=cardinality)()
elif dtype.kind in ("U", "O"):
values = [
_generate_string(
string.printable,
100,
)
for _ in range(cardinality)
]
elif dtype.kind == "M":
values = datetime_generator(dtype=dtype, size=cardinality)().astype(
dtype
)
elif dtype.kind == "m":
values = timedelta_generator(dtype=dtype, size=cardinality)().astype(
dtype
)
elif dtype.kind == "b":
values = boolean_generator(cardinality)().astype(dtype)
else:
raise TypeError(f"Unsupported dtype: {dtype}")
return values
def make_lists(dtype, lists_max_length, nesting_depth, top_level_list):
"""
Helper to create random list of lists with `nesting_depth` and
specified value type `dtype`.
"""
nesting_depth -= 1
if nesting_depth >= 0:
L = np.random.randint(1, lists_max_length)
for i in range(L):
top_level_list.append(
make_lists(
dtype=dtype,
lists_max_length=lists_max_length,
nesting_depth=nesting_depth,
top_level_list=[],
)
)
else:
top_level_list = get_values_for_nested_data(
dtype=dtype, lists_max_length=lists_max_length
)
# To ensure numpy arrays are not passed as input to
# list constructor, returning a python list object here.
if isinstance(top_level_list, np.ndarray):
top_level_list = top_level_list.tolist()
return top_level_list
def make_array_for_struct(dtype, cardinality, size, max_null_frequency):
"""
Helper to create a pa.array with `size` and `dtype`
for a `StructArray`.
"""
null_frequency = np.random.uniform(low=0, high=max_null_frequency)
local_cardinality = max(np.random.randint(low=0, high=cardinality), 1)
data = get_values_for_nested_data(
dtype=dtype.type.to_pandas_dtype(), size=local_cardinality
)
vals = np.random.choice(data, size=size)
return pa.array(
vals,
mask=np.random.choice(
[True, False],
size=size,
p=[null_frequency, 1 - null_frequency],
)
if null_frequency > 0.0
else None,
size=size,
safe=False,
type=dtype.type,
)
def get_nested_lists(dtype, size, nesting_depth, lists_max_length):
"""
Returns a list of nested lists with random nesting
depth and random nested lists length.
"""
list_of_lists = []
while len(list_of_lists) <= size:
list_of_lists.extend(
make_lists(
dtype=dtype,
lists_max_length=lists_max_length,
nesting_depth=nesting_depth,
top_level_list=[],
)
)
return list_of_lists
def get_nested_structs(dtype, cardinality, size, max_null_frequency):
"""
Returns a list of arrays with random data
corresponding to the dtype provided.
``dtype`` here should be a ``cudf.StructDtype``
"""
list_of_arrays = []
for name, col_dtype in dtype.fields.items():
if isinstance(col_dtype, cudf.StructDtype):
result_arrays = get_nested_structs(
col_dtype, cardinality, size, max_null_frequency
)
result_arrays = pa.StructArray.from_arrays(
result_arrays, names=col_dtype.fields.keys()
)
else:
result_arrays = make_array_for_struct(
dtype=dtype._typ[name],
cardinality=cardinality,
size=size,
max_null_frequency=max_null_frequency,
)
list_of_arrays.append(result_arrays)
return list_of_arrays
def list_generator(dtype, size, nesting_depth, lists_max_length):
"""
Generator for list data
"""
return lambda: get_nested_lists(
dtype=dtype,
size=size,
nesting_depth=nesting_depth,
lists_max_length=lists_max_length,
)
def struct_generator(dtype, cardinality, size, max_null_frequency):
"""
Generator for struct data
"""
return lambda: get_nested_structs(
dtype=dtype,
cardinality=cardinality,
size=size,
max_null_frequency=max_null_frequency,
)
def create_nested_struct_type(max_types_at_each_level, nesting_level):
dtypes_list = cudf.utils.dtypes.ALL_TYPES
picked_types = np.random.choice(list(dtypes_list), max_types_at_each_level)
type_dict = {}
for name, type_ in enumerate(picked_types):
if type_ == "struct":
type_dict[str(name)] = create_nested_struct_type(
max_types_at_each_level, nesting_level - 1
)
else:
type_dict[str(name)] = cudf.dtype(type_)
return cudf.StructDtype(type_dict)
def _generate_string(str_seq: str, length: int = 10) -> str:
return "".join(random.choices(str_seq, k=length))
def _unique_string() -> str:
return str(uuid.uuid4()).replace("-", "")
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/indexing_utils.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, List, Tuple, Union
from typing_extensions import TypeAlias
import cudf
from cudf.api.types import (
_is_scalar_or_zero_d_array,
is_bool_dtype,
is_integer,
is_integer_dtype,
)
from cudf.core.copy_types import BooleanMask, GatherMap
class EmptyIndexer:
"""An indexer that will produce an empty result."""
pass
@dataclass
class MapIndexer:
"""An indexer for a gather map."""
key: GatherMap
@dataclass
class MaskIndexer:
"""An indexer for a boolean mask."""
key: BooleanMask
@dataclass
class SliceIndexer:
"""An indexer for a slice."""
key: slice
@dataclass
class ScalarIndexer:
"""An indexer for a scalar value."""
key: GatherMap
IndexingSpec: TypeAlias = Union[
EmptyIndexer, MapIndexer, MaskIndexer, ScalarIndexer, SliceIndexer
]
ColumnLabels: TypeAlias = List[str]
def destructure_iloc_key(
key: Any, frame: Union[cudf.Series, cudf.DataFrame]
) -> tuple[Any, ...]:
"""
Destructure a potentially tuple-typed key into row and column indexers.
Tuple arguments to iloc indexing are treated specially. They are
picked apart into indexers for the row and column. If the number
of entries is less than the number of modes of the frame, missing
entries are slice-expanded.
If the user-provided key is not a tuple, it is treated as if it
were a singleton tuple, and then slice-expanded.
Once this destructuring has occurred, any entries that are
callables are then called with the indexed frame. This should
return a valid indexing object for the rows (respectively
columns), namely one of:
- A boolean mask of the same length as the frame in the given
dimension
- A scalar integer that indexes the frame
- An array-like of integers that index the frame
- A slice that indexes the frame
Integer and slice-based indexing follows usual Python conventions.
Parameters
----------
key
The key to destructure
frame
DataFrame or Series to provide context
Returns
-------
tuple
Indexers with length equal to the dimension of the frame
Raises
------
IndexError
If there are too many indexers, or any individual indexer is a tuple.
"""
n = len(frame.shape)
if isinstance(key, tuple):
# Key potentially indexes rows and columns, slice-expand to
# shape of frame
indexers = key + (slice(None),) * (n - len(key))
if len(indexers) > n:
raise IndexError(
f"Too many indexers: got {len(indexers)} expected {n}"
)
else:
# Key indexes rows, slice-expand to shape of frame
indexers = (key, *(slice(None),) * (n - 1))
indexers = tuple(k(frame) if callable(k) else k for k in indexers)
if any(isinstance(k, tuple) for k in indexers):
raise IndexError(
"Too many indexers: can't have nested tuples in iloc indexing"
)
return indexers
def destructure_dataframe_iloc_indexer(
key: Any, frame: cudf.DataFrame
) -> Tuple[Any, Tuple[bool, ColumnLabels]]:
"""Destructure an index key for DataFrame iloc getitem.
Parameters
----------
key
Key to destructure
frame
DataFrame to provide context context
Returns
-------
tuple
2-tuple of a key for the rows and tuple of
(column_index_is_scalar, column_names) for the columns
Raises
------
TypeError
If the column indexer is invalid
IndexError
If the provided key does not destructure correctly
NotImplementedError
If the requested column indexer repeats columns
"""
rows, cols = destructure_iloc_key(key, frame)
if cols is Ellipsis:
cols = slice(None)
scalar = is_integer(cols)
try:
column_names: ColumnLabels = list(
frame._data.get_labels_by_index(cols)
)
if len(set(column_names)) != len(column_names):
raise NotImplementedError(
"cudf DataFrames do not support repeated column names"
)
except TypeError:
raise TypeError(
"Column indices must be integers, slices, "
"or list-like of integers"
)
if scalar:
assert (
len(column_names) == 1
), "Scalar column indexer should not produce more than one column"
return rows, (scalar, column_names)
def destructure_series_iloc_indexer(key: Any, frame: cudf.Series) -> Any:
"""Destructure an index key for Series iloc getitem.
Parameters
----------
key
Key to destructure
frame
Series for unpacking context
Returns
-------
Single key that will index the rows
"""
(rows,) = destructure_iloc_key(key, frame)
return rows
def parse_row_iloc_indexer(key: Any, n: int) -> IndexingSpec:
"""
Normalize and produce structured information about a row indexer.
Given a row indexer that has already been destructured by
:func:`destructure_iloc_key`, inspect further and produce structured
information for indexing operations to act upon.
Parameters
----------
key
Suitably destructured key for row indexing
n
Length of frame to index
Returns
-------
IndexingSpec
Structured data for indexing. A tag + parsed data.
Raises
------
IndexError
If a valid type of indexer is provided, but it is out of
bounds
TypeError
If the indexing key is otherwise invalid.
"""
if key is Ellipsis:
return SliceIndexer(slice(None))
elif isinstance(key, slice):
return SliceIndexer(key)
elif _is_scalar_or_zero_d_array(key):
return ScalarIndexer(GatherMap(key, n, nullify=False))
else:
key = cudf.core.column.as_column(key)
if isinstance(key, cudf.core.column.CategoricalColumn):
key = key.as_numerical_column(key.codes.dtype)
if is_bool_dtype(key.dtype):
return MaskIndexer(BooleanMask(key, n))
elif len(key) == 0:
return EmptyIndexer()
elif is_integer_dtype(key.dtype):
return MapIndexer(GatherMap(key, n, nullify=False))
else:
raise TypeError(
"Cannot index by location "
f"with non-integer key of type {type(key)}"
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/subword_tokenizer.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
from __future__ import annotations
import warnings
from typing import Union
import cupy as cp
from cudf._lib.nvtext.subword_tokenize import (
Hashed_Vocabulary as cpp_hashed_vocabulary,
subword_tokenize_inmem_hash as cpp_subword_tokenize,
)
def _cast_to_appropriate_type(ar, cast_type):
if cast_type == "cp":
return ar
if cast_type == "pt":
from torch.utils.dlpack import from_dlpack
elif cast_type == "tf":
from tensorflow.experimental.dlpack import from_dlpack
return from_dlpack(ar.astype("int32").toDlpack())
class SubwordTokenizer:
"""
Run CUDA BERT subword tokenizer on cuDF strings column.
Encodes words to token ids using vocabulary from a pretrained
tokenizer.
This function requires about 21x the number of character bytes
in the input strings column as working memory.
Parameters
----------
hash_file : str
Path to hash file containing vocabulary of words with token-ids.
This can be created from the raw vocabulary
using the ``cudf.utils.hash_vocab_utils.hash_vocab`` function
do_lower : bool, Default is True
If set to True, original text will be lowercased before encoding.
Returns
-------
SubwordTokenizer
"""
def __init__(self, hash_file: str, do_lower_case: bool = True):
self.do_lower_case = do_lower_case
self.vocab_file = cpp_hashed_vocabulary(hash_file)
def __call__(
self,
text,
max_length: int,
max_num_rows: int,
add_special_tokens: bool = True,
padding: str = "max_length",
truncation: Union[bool, str] = False,
stride: int = 0,
return_tensors: str = "cp",
return_token_type_ids: bool = False,
):
"""
Run CUDA BERT subword tokenizer on cuDF strings column.
Encodes words to token ids using vocabulary from a
pretrained tokenizer.
Parameters
----------
text : cudf string series
The batch of sequences to be encoded.
max_length : int
Controls the maximum length to use or pad to.
max_num_rows : int
Maximum number of rows for the output token-ids expected to
be generated by the tokenizer.
Used for allocating temporary working memory on the GPU device.
If the output generates a larger number of rows,
behavior is undefined.
This will vary based on stride, truncation, and max_length.
For example, for non-overlapping sequences output rows will be
the same as input rows.
A good default can be twice the max_length
add_special_tokens : bool, optional, defaults to True
Whether or not to encode the sequences with the special tokens
of the BERT classification model
padding : "max_length"
Pad to a maximum length specified with the argument max_length
truncation : bool, defaults to False
True:
Truncate to a maximum length specified with the argument max_length
False or 'do_not_truncate': default
No truncation (Output differs from HuggingFace)
stride : int, optional, defaults to 0
The value of this argument defines the number of
overlapping tokens.
The information about the overlapping tokens is
present in the metadata outputted.
return_tensors : str, {"cp", "pt", "tf"} defaults to "cp"
"cp" : Return cupy cp.ndarray objects
"tf" : Return TensorFlow tf.constant objects
"pt" : Return PyTorch torch.Tensor objects
return_token_type_ids : bool, optional
Only False currently supported
Returns
-------
An encoding with the following fields:
input_ids:(type defined by return_tensors)
A tensor of token ids to be fed to the model.
attention_mask: (type defined by return_tensors)
A tensor of indices specifying which tokens
should be attended to by the model
metadata: (type defined by return_tensors)
Each row contains the index id of the original string and the
first and last index of the token-ids that are non-padded and
non-overlapping
Examples
--------
>>> import cudf
>>> from cudf.utils.hash_vocab_utils import hash_vocab
>>> hash_vocab('bert-base-cased-vocab.txt', 'voc_hash.txt')
>>> from cudf.core.subword_tokenizer import SubwordTokenizer
>>> cudf_tokenizer = SubwordTokenizer('voc_hash.txt',
... do_lower_case=True)
>>> str_series = cudf.Series(['This is the', 'best book'])
>>> tokenizer_output = cudf_tokenizer(str_series,
... max_length=8,
... max_num_rows=len(str_series),
... padding='max_length',
... return_tensors='pt',
... truncation=True)
>>> tokenizer_output['input_ids']
tensor([[ 101, 1142, 1110, 1103, 102, 0, 0, 0],
[ 101, 1436, 1520, 102, 0, 0, 0, 0]],
device='cuda:0',
dtype=torch.int32)
>>> tokenizer_output['attention_mask']
tensor([[1, 1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0]],
device='cuda:0', dtype=torch.int32)
>>> tokenizer_output['metadata']
tensor([[0, 1, 3],
[1, 1, 2]], device='cuda:0', dtype=torch.int32)
"""
if return_token_type_ids:
# raise not currently supported
# Can also return zeros
error_msg = "Returning token_type_ids is currently supported"
raise NotImplementedError(error_msg)
if truncation in (False, "do_not_truncate"):
if add_special_tokens:
error_msg = (
"Adding special tokens is not supported "
f"with truncation = {truncation}. "
)
recommendation = (
"Custom Cupy kernel can potentially "
"be used to add it. For reference "
"see: _bert_add_special_tokens"
)
raise NotImplementedError(error_msg + recommendation)
truncation = False
warning_msg = (
"When truncation is not True, the behavior currently differs "
"from HuggingFace as cudf always returns overflowing tokens"
)
warnings.warn(warning_msg)
if padding != "max_length":
error_msg = (
"Only padding to the provided max_length"
"is currently supported"
)
raise NotImplementedError(error_msg)
if max_length <= stride:
error_msg = "Stride should be less than max_length"
raise ValueError(error_msg)
if return_tensors not in {"cp", "pt", "tf"}:
error_msg = (
"Only cupy(cp), pytorch(pt) and tensorflow(tf) "
"tensors are supported"
)
raise NotImplementedError(error_msg)
stride = max_length - stride
# behavior varies from subword_tokenize but maps with huggingface
input_ids, attention_mask, metadata = cpp_subword_tokenize(
text._column,
self.vocab_file,
max_sequence_length=max_length,
stride=stride,
do_lower=self.do_lower_case,
do_truncate=truncation,
)
tokenizer_output = {
"input_ids": cp.asarray(input_ids).reshape(-1, max_length),
"attention_mask": cp.asarray(attention_mask).reshape(
-1, max_length
),
"metadata": cp.asarray(metadata).reshape(-1, 3),
}
if add_special_tokens:
tokenizer_output = _bert_add_special_tokens(tokenizer_output)
tokenizer_output = {
k: _cast_to_appropriate_type(v, return_tensors)
for k, v in tokenizer_output.items()
}
return tokenizer_output
def _bert_add_special_tokens(token_o):
"""
Adds special tokens (CLS,SEP) which are often used by pre-trained BERT
models to input_ids and adjusts attention_mask and metadata to account
for them.
"""
max_length = token_o["input_ids"].shape[1]
seq_end_col = max_length - (token_o["input_ids"][:, ::-1] != 0).argmax(1)
# clipping to take overflow into account
seq_end_col = cp.clip(seq_end_col + 1, a_min=None, a_max=max_length - 1)
_bert_add_special_tokens_input_ids(token_o["input_ids"], seq_end_col)
_bert_add_special_tokens_attention_mask(
token_o["attention_mask"], seq_end_col
)
_bert_add_special_tokens_metadata(token_o["metadata"], max_length)
return token_o
def _bert_add_special_tokens_input_ids(input_ids, seq_end_col):
"""
Add token ids for special tokens ([CLS] and [SEP]) to
the start and end of each sequence
"""
# Mark sequence start with [CLS] token mapping to the start of sequence
input_ids[:, 1:-1] = input_ids[:, 0:-2]
input_ids[:, 0] = 101
# Mark end of sequence [SEP]
input_ids[
cp.arange(0, input_ids.shape[0], dtype=cp.uint32), seq_end_col
] = 102
def _bert_add_special_tokens_attention_mask(attention_mask, seq_end_col):
"""
Mark attention mask for special tokens ([CLS] and [SEP]) with 1
"""
# Copy attention masks for all but last two
attention_mask[:, 1:-1] = attention_mask[:, 0:-2]
# Mark [CLS] token with 1
attention_mask[:, 0] = 1
# Mark [SEP] token with 1
attention_mask[
cp.arange(0, attention_mask.shape[0], dtype=cp.uint32), seq_end_col
] = 1
def _bert_add_special_tokens_metadata(metadata, max_length):
"""
Edit metadata to account for the added special tokens ([CLS] and [SEP])
"""
# metadata seq starts from plus 1
metadata[:, 1] = metadata[:, 1] + 1
# clip done to take overflow into account
metadata[:, 2] = cp.clip(
metadata[:, 2] + 1, a_min=None, a_max=max_length - 2
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/frame.py
|
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
from __future__ import annotations
import copy
import itertools
import operator
import pickle
import warnings
from collections import abc
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
Optional,
Tuple,
Union,
)
# TODO: The `numpy` import is needed for typing purposes during doc builds
# only, need to figure out why the `np` alias is insufficient then remove.
import cupy
import numpy
import numpy as np
import pyarrow as pa
from typing_extensions import Self
import cudf
from cudf import _lib as libcudf
from cudf._typing import Dtype
from cudf.api.extensions import no_default
from cudf.api.types import is_bool_dtype, is_dtype_equal, is_scalar
from cudf.core.buffer import acquire_spill_lock
from cudf.core.column import (
ColumnBase,
as_column,
build_categorical_column,
deserialize_columns,
serialize_columns,
)
from cudf.core.column_accessor import ColumnAccessor
from cudf.core.mixins import BinaryOperand, Scannable
from cudf.core.window import Rolling
from cudf.utils import ioutils
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import can_convert_to_column, find_common_type
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import _array_ufunc, _warn_no_dask_cudf
# TODO: It looks like Frame is missing a declaration of `copy`, need to add
class Frame(BinaryOperand, Scannable):
"""A collection of Column objects with an optional index.
Parameters
----------
data : dict
An dict mapping column names to Columns
index : Table
A Frame representing the (optional) index columns.
"""
_data: "ColumnAccessor"
_VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS
def __init__(self, data=None):
if data is None:
data = {}
self._data = cudf.core.column_accessor.ColumnAccessor(data)
@property
def _num_columns(self) -> int:
return len(self._data)
@property
def _num_rows(self) -> int:
return 0 if self._num_columns == 0 else len(self._data.columns[0])
@property
def _column_names(self) -> Tuple[Any, ...]: # TODO: Tuple[str]?
return tuple(self._data.names)
@property
def _columns(self) -> Tuple[Any, ...]: # TODO: Tuple[Column]?
return tuple(self._data.columns)
@property
def _dtypes(self):
return dict(
zip(self._data.names, (col.dtype for col in self._data.columns))
)
@property
def _has_nulls(self):
return any(col.has_nulls() for col in self._data.values())
@_cudf_nvtx_annotate
def serialize(self):
header = {
"type-serialized": pickle.dumps(type(self)),
"column_names": pickle.dumps(tuple(self._data.names)),
}
header["columns"], frames = serialize_columns(self._columns)
return header, frames
@classmethod
@_cudf_nvtx_annotate
def deserialize(cls, header, frames):
cls_deserialize = pickle.loads(header["type-serialized"])
column_names = pickle.loads(header["column_names"])
columns = deserialize_columns(header["columns"], frames)
return cls_deserialize._from_data(dict(zip(column_names, columns)))
@classmethod
@_cudf_nvtx_annotate
def _from_data(cls, data: MutableMapping):
obj = cls.__new__(cls)
Frame.__init__(obj, data)
return obj
@_cudf_nvtx_annotate
def _from_data_like_self(self, data: MutableMapping):
return self._from_data(data)
@classmethod
@_cudf_nvtx_annotate
def _from_columns(
cls,
columns: List[ColumnBase],
column_names: abc.Iterable[str],
):
"""Construct a `Frame` object from a list of columns."""
data = {name: columns[i] for i, name in enumerate(column_names)}
return cls._from_data(data)
@_cudf_nvtx_annotate
def _from_columns_like_self(
self,
columns: List[ColumnBase],
column_names: Optional[abc.Iterable[str]] = None,
*,
override_dtypes: Optional[abc.Iterable[Optional[Dtype]]] = None,
):
"""Construct a Frame from a list of columns with metadata from self.
If `column_names` is None, use column names from self.
"""
if column_names is None:
column_names = self._column_names
frame = self.__class__._from_columns(columns, column_names)
return frame._copy_type_metadata(self, override_dtypes=override_dtypes)
@_cudf_nvtx_annotate
def _mimic_inplace(
self, result: Self, inplace: bool = False
) -> Optional[Self]:
if inplace:
for col in self._data:
if col in result._data:
self._data[col]._mimic_inplace(
result._data[col], inplace=True
)
self._data = result._data
return None
else:
return result
@property
@_cudf_nvtx_annotate
def size(self):
"""
Return the number of elements in the underlying data.
Returns
-------
size : Size of the DataFrame / Index / Series / MultiIndex
Examples
--------
Size of an empty dataframe is 0.
>>> import cudf
>>> df = cudf.DataFrame()
>>> df
Empty DataFrame
Columns: []
Index: []
>>> df.size
0
>>> df = cudf.DataFrame(index=[1, 2, 3])
>>> df
Empty DataFrame
Columns: []
Index: [1, 2, 3]
>>> df.size
0
DataFrame with values
>>> df = cudf.DataFrame({'a': [10, 11, 12],
... 'b': ['hello', 'rapids', 'ai']})
>>> df
a b
0 10 hello
1 11 rapids
2 12 ai
>>> df.size
6
>>> df.index
RangeIndex(start=0, stop=3)
>>> df.index.size
3
Size of an Index
>>> index = cudf.Index([])
>>> index
Float64Index([], dtype='float64')
>>> index.size
0
>>> index = cudf.Index([1, 2, 3, 10])
>>> index
Int64Index([1, 2, 3, 10], dtype='int64')
>>> index.size
4
Size of a MultiIndex
>>> midx = cudf.MultiIndex(
... levels=[["a", "b", "c", None], ["1", None, "5"]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> midx
MultiIndex([( 'a', '1'),
( 'a', '5'),
( 'b', <NA>),
( 'c', <NA>),
(<NA>, '1')],
names=['x', 'y'])
>>> midx.size
5
"""
return self._num_columns * self._num_rows
@_cudf_nvtx_annotate
def memory_usage(self, deep=False):
"""Return the memory usage of an object.
Parameters
----------
deep : bool
The deep parameter is ignored and is only included for pandas
compatibility.
Returns
-------
The total bytes used.
"""
raise NotImplementedError
@_cudf_nvtx_annotate
def __len__(self):
return self._num_rows
@_cudf_nvtx_annotate
def astype(self, dtype, copy=False, **kwargs):
result_data = {}
for col_name, col in self._data.items():
dt = dtype.get(col_name, col.dtype)
if not is_dtype_equal(dt, col.dtype):
result_data[col_name] = col.astype(dt, copy=copy, **kwargs)
else:
result_data[col_name] = col.copy() if copy else col
return ColumnAccessor._create_unsafe(
data=result_data,
multiindex=self._data.multiindex,
level_names=self._data.level_names,
)
@_cudf_nvtx_annotate
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two objects to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type.
Parameters
----------
other : Index, Series, DataFrame
The other object to be compared with.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
Examples
--------
>>> import cudf
Comparing Series with `equals`:
>>> s = cudf.Series([1, 2, 3])
>>> other = cudf.Series([1, 2, 3])
>>> s.equals(other)
True
>>> different = cudf.Series([1.5, 2, 3])
>>> s.equals(different)
False
Comparing DataFrames with `equals`:
>>> df = cudf.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
>>> exactly_equal = cudf.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
For two DataFrames to compare equal, the types of column
values must be equal, but the types of column labels
need not:
>>> different_column_type = cudf.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
"""
if self is other:
return True
if (
other is None
or not isinstance(other, type(self))
or len(self) != len(other)
):
return False
return all(
self_col.equals(other_col, check_dtypes=True)
for self_col, other_col in zip(
self._data.values(), other._data.values()
)
)
@_cudf_nvtx_annotate
def _get_columns_by_label(self, labels, *, downcast=False) -> Self:
"""
Returns columns of the Frame specified by `labels`
"""
return self.__class__._from_data(self._data.select_by_label(labels))
@property
@_cudf_nvtx_annotate
def values(self):
"""
Return a CuPy representation of the DataFrame.
Only the values in the DataFrame will be returned, the axes labels will
be removed.
Returns
-------
cupy.ndarray
The values of the DataFrame.
"""
return self.to_cupy()
@property
@_cudf_nvtx_annotate
def values_host(self):
"""
Return a NumPy representation of the data.
Only the values in the DataFrame will be returned, the axes labels will
be removed.
Returns
-------
numpy.ndarray
A host representation of the underlying data.
"""
return self.to_numpy()
@_cudf_nvtx_annotate
def __array__(self, dtype=None):
raise TypeError(
"Implicit conversion to a host NumPy array via __array__ is not "
"allowed, To explicitly construct a GPU matrix, consider using "
".to_cupy()\nTo explicitly construct a host matrix, consider "
"using .to_numpy()."
)
@_cudf_nvtx_annotate
def __arrow_array__(self, type=None):
raise TypeError(
"Implicit conversion to a host PyArrow object via __arrow_array__ "
"is not allowed. Consider using .to_arrow()"
)
@_cudf_nvtx_annotate
def _to_array(
self,
get_column_values: Callable,
make_empty_matrix: Callable,
dtype: Union[Dtype, None] = None,
na_value=None,
) -> Union[cupy.ndarray, np.ndarray]:
# Internal function to implement to_cupy and to_numpy, which are nearly
# identical except for the attribute they access to generate values.
def get_column_values_na(col):
if na_value is not None:
col = col.fillna(na_value)
return get_column_values(col)
# Early exit for an empty Frame.
ncol = self._num_columns
if ncol == 0:
return make_empty_matrix(
shape=(len(self), ncol), dtype=np.dtype("float64"), order="F"
)
if dtype is None:
dtypes = [col.dtype for col in self._data.values()]
for dtype in dtypes:
if isinstance(
dtype,
(
cudf.ListDtype,
cudf.core.dtypes.DecimalDtype,
cudf.StructDtype,
),
):
raise NotImplementedError(
f"{dtype} cannot be exposed as a cupy array"
)
dtype = find_common_type(dtypes)
matrix = make_empty_matrix(
shape=(len(self), ncol), dtype=dtype, order="F"
)
for i, col in enumerate(self._data.values()):
# TODO: col.values may fail if there is nullable data or an
# unsupported dtype. We may want to catch and provide a more
# suitable error.
matrix[:, i] = get_column_values_na(col)
return matrix
# TODO: As of now, calling cupy.asarray is _much_ faster than calling
# to_cupy. We should investigate the reasons why and whether we can provide
# a more efficient method here by exploiting __cuda_array_interface__. In
# particular, we need to benchmark how much of the overhead is coming from
# (potentially unavoidable) local copies in to_cupy and how much comes from
# inefficiencies in the implementation.
@_cudf_nvtx_annotate
def to_cupy(
self,
dtype: Union[Dtype, None] = None,
copy: bool = False,
na_value=None,
) -> cupy.ndarray:
"""Convert the Frame to a CuPy array.
Parameters
----------
dtype : str or :class:`numpy.dtype`, optional
The dtype to pass to :func:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_cupy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, default None
The value to use for missing values. The default value depends on
dtype and the dtypes of the DataFrame columns.
Returns
-------
cupy.ndarray
"""
return self._to_array(
(lambda col: col.values.copy())
if copy
else (lambda col: col.values),
cupy.empty,
dtype,
na_value,
)
@_cudf_nvtx_annotate
def to_numpy(
self,
dtype: Union[Dtype, None] = None,
copy: bool = True,
na_value=None,
) -> numpy.ndarray:
"""Convert the Frame to a NumPy array.
Parameters
----------
dtype : str or :class:`numpy.dtype`, optional
The dtype to pass to :func:`numpy.asarray`.
copy : bool, default True
Whether to ensure that the returned value is not a view on
another array. This parameter must be ``True`` since cuDF must copy
device memory to host to provide a numpy array.
na_value : Any, default None
The value to use for missing values. The default value depends on
dtype and the dtypes of the DataFrame columns.
Returns
-------
numpy.ndarray
"""
if not copy:
raise ValueError(
"copy=False is not supported because conversion to a numpy "
"array always copies the data."
)
return self._to_array(
(lambda col: col.values_host), np.empty, dtype, na_value
)
@_cudf_nvtx_annotate
def where(self, cond, other=None, inplace=False):
"""
Replace values where the condition is False.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is True, keep the original value.
Where False, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is False are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimension as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.where(df % 2 == 0, [-1, -1])
A B
0 -1 -1
1 4 -1
2 -1 8
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.where(ser > 2, 10)
0 4
1 3
2 10
3 10
4 10
dtype: int64
>>> ser.where(ser > 2)
0 4
1 3
2 <NA>
3 <NA>
4 <NA>
dtype: int64
.. pandas-compat::
Note that ``where`` treats missing values as falsy,
in parallel with pandas treatment of nullable data:
>>> gsr = cudf.Series([1, 2, 3])
>>> gsr.where([True, False, cudf.NA])
0 1
1 <NA>
2 <NA>
dtype: int64
>>> gsr.where([True, False, False])
0 1
1 <NA>
2 <NA>
dtype: int64
"""
raise NotImplementedError
@_cudf_nvtx_annotate
def mask(self, cond, other=None, inplace=False):
"""
Replace values where the condition is True.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is False, keep the original value.
Where True, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is True are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimension as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.mask(df % 2 == 0, [-1, -1])
A B
0 1 3
1 -1 5
2 5 -1
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.mask(ser > 2, 10)
0 10
1 10
2 2
3 1
4 0
dtype: int64
>>> ser.mask(ser > 2)
0 <NA>
1 <NA>
2 2
3 1
4 0
dtype: int64
"""
if not hasattr(cond, "__invert__"):
# We Invert `cond` below and call `where`, so
# making sure the object supports
# `~`(inversion) operator or `__invert__` method
cond = cupy.asarray(cond)
return self.where(cond=~cond, other=other, inplace=inplace)
@_cudf_nvtx_annotate
def pipe(self, func, *args, **kwargs):
"""
Apply ``func(self, *args, **kwargs)``.
Parameters
----------
func : function
Function to apply to the Series/DataFrame/Index.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the Series/DataFrame/Index.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Examples
--------
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> func(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(func, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((func, 'arg2'), arg1=a, arg3=c)
... )
"""
return cudf.core.common.pipe(self, func, *args, **kwargs)
@_cudf_nvtx_annotate
def fillna(
self, value=None, method=None, axis=None, inplace=False, limit=None
):
"""Fill null values with ``value`` or specified ``method``.
Parameters
----------
value : scalar, Series-like or dict
Value to use to fill nulls. If Series-like, null values
are filled with values in corresponding indices.
A dict can be used to provide different values to fill nulls
in different columns. Cannot be used with ``method``.
method : {'ffill', 'bfill'}, default None
Method to use for filling null values in the dataframe or series.
`ffill` propagates the last non-null values forward to the next
non-null value. `bfill` propagates backward with the next non-null
value. Cannot be used with ``value``.
Returns
-------
result : DataFrame, Series, or Index
Copy with nulls filled.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, None], 'b': [3, None, 5]})
>>> df
a b
0 1 3
1 2 <NA>
2 <NA> 5
>>> df.fillna(4)
a b
0 1 3
1 2 4
2 4 5
>>> df.fillna({'a': 3, 'b': 4})
a b
0 1 3
1 2 4
2 3 5
``fillna`` on a Series object:
>>> ser = cudf.Series(['a', 'b', None, 'c'])
>>> ser
0 a
1 b
2 <NA>
3 c
dtype: object
>>> ser.fillna('z')
0 a
1 b
2 z
3 c
dtype: object
``fillna`` can also supports inplace operation:
>>> ser.fillna('z', inplace=True)
>>> ser
0 a
1 b
2 z
3 c
dtype: object
>>> df.fillna({'a': 3, 'b': 4}, inplace=True)
>>> df
a b
0 1 3
1 2 4
2 3 5
``fillna`` specified with fill ``method``
>>> ser = cudf.Series([1, None, None, 2, 3, None, None])
>>> ser.fillna(method='ffill')
0 1
1 1
2 1
3 2
4 3
5 3
6 3
dtype: int64
>>> ser.fillna(method='bfill')
0 1
1 2
2 2
3 2
4 3
5 <NA>
6 <NA>
dtype: int64
"""
if limit is not None:
raise NotImplementedError("The limit keyword is not supported")
if axis:
raise NotImplementedError("The axis keyword is not supported")
if value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
if method:
if method not in {"ffill", "bfill", "pad", "backfill"}:
raise NotImplementedError(
f"Fill method {method} is not supported"
)
if method == "pad":
method = "ffill"
elif method == "backfill":
method = "bfill"
# TODO: This logic should be handled in different subclasses since
# different Frames support different types of values.
if isinstance(value, cudf.Series):
value = value.reindex(self._data.names)
elif isinstance(value, cudf.DataFrame):
if not self.index.equals(value.index):
value = value.reindex(self.index)
else:
value = value
elif not isinstance(value, abc.Mapping):
value = {name: copy.deepcopy(value) for name in self._data.names}
else:
value = {
key: value.reindex(self.index)
if isinstance(value, cudf.Series)
else value
for key, value in value.items()
}
filled_data = {}
for col_name, col in self._data.items():
if col_name in value and method is None:
replace_val = value[col_name]
else:
replace_val = None
should_fill = (
col_name in value
and col.contains_na_entries
and not libcudf.scalar._is_null_host_scalar(replace_val)
) or method is not None
if should_fill:
filled_data[col_name] = col.fillna(replace_val, method)
else:
filled_data[col_name] = col.copy(deep=True)
return self._mimic_inplace(
self._from_data(
data=ColumnAccessor._create_unsafe(
data=filled_data,
multiindex=self._data.multiindex,
level_names=self._data.level_names,
)
),
inplace=inplace,
)
@_cudf_nvtx_annotate
def _drop_column(self, name):
"""Drop a column by *name*"""
if name not in self._data:
raise KeyError(f"column '{name}' does not exist")
del self._data[name]
@_cudf_nvtx_annotate
def _drop_na_columns(self, how="any", subset=None, thresh=None):
"""
Drop columns containing nulls
"""
out_cols = []
if subset is None:
df = self
else:
df = self.take(subset)
if thresh is None:
if how == "all":
thresh = 1
else:
thresh = len(df)
for name, col in df._data.items():
try:
check_col = col.nans_to_nulls()
except AttributeError:
check_col = col
no_threshold_valid_count = (
len(col) - check_col.null_count
) < thresh
if no_threshold_valid_count:
continue
out_cols.append(name)
return self[out_cols]
@_cudf_nvtx_annotate
def _quantile_table(
self,
q,
interpolation="LINEAR",
is_sorted=False,
column_order=(),
null_precedence=(),
):
interpolation = libcudf.types.Interpolation[interpolation]
is_sorted = libcudf.types.Sorted["YES" if is_sorted else "NO"]
column_order = [libcudf.types.Order[key] for key in column_order]
null_precedence = [
libcudf.types.NullOrder[key] for key in null_precedence
]
return self._from_columns_like_self(
libcudf.quantiles.quantile_table(
[*self._columns],
q,
interpolation,
is_sorted,
column_order,
null_precedence,
),
column_names=self._column_names,
)
@classmethod
@_cudf_nvtx_annotate
def from_arrow(cls, data):
"""Convert from PyArrow Table to Frame
Parameters
----------
data : PyArrow Table
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> data = pa.table({"a":[1, 2, 3], "b":[4, 5, 6]})
>>> cudf.core.frame.Frame.from_arrow(data)
a b
0 1 4
1 2 5
2 3 6
"""
if not isinstance(data, (pa.Table)):
raise TypeError(
"To create a multicolumn cudf data, "
"the data should be an arrow Table"
)
column_names = data.column_names
pandas_dtypes = {}
np_dtypes = {}
if isinstance(data.schema.pandas_metadata, dict):
metadata = data.schema.pandas_metadata
pandas_dtypes = {
col["field_name"]: col["pandas_type"]
for col in metadata["columns"]
if "field_name" in col
}
np_dtypes = {
col["field_name"]: col["numpy_type"]
for col in metadata["columns"]
if "field_name" in col
}
# Currently we don't have support for
# pyarrow.DictionaryArray -> cudf Categorical column,
# so handling indices and dictionary as two different columns.
# This needs be removed once we have hooked libcudf dictionary32
# with categorical.
dict_indices = {}
dict_dictionaries = {}
dict_ordered = {}
for field in data.schema:
if isinstance(field.type, pa.DictionaryType):
dict_ordered[field.name] = field.type.ordered
dict_indices[field.name] = pa.chunked_array(
[chunk.indices for chunk in data[field.name].chunks],
type=field.type.index_type,
)
dict_dictionaries[field.name] = pa.chunked_array(
[chunk.dictionary for chunk in data[field.name].chunks],
type=field.type.value_type,
)
# Handle dict arrays
cudf_category_frame = {}
if len(dict_indices):
dict_indices_table = pa.table(dict_indices)
data = data.drop(dict_indices_table.column_names)
indices_columns = libcudf.interop.from_arrow(dict_indices_table)
# as dictionary size can vary, it can't be a single table
cudf_dictionaries_columns = {
name: ColumnBase.from_arrow(dict_dictionaries[name])
for name in dict_dictionaries.keys()
}
cudf_category_frame = {
name: build_categorical_column(
cudf_dictionaries_columns[name],
codes,
mask=codes.base_mask,
size=codes.size,
ordered=dict_ordered[name],
)
for name, codes in zip(
dict_indices_table.column_names, indices_columns
)
}
# Handle non-dict arrays
cudf_non_category_frame = {
name: col
for name, col in zip(
data.column_names, libcudf.interop.from_arrow(data)
)
}
result = {**cudf_non_category_frame, **cudf_category_frame}
# There are some special cases that need to be handled
# based on metadata.
for name in result:
if (
len(result[name]) == 0
and pandas_dtypes.get(name) == "categorical"
):
# When pandas_dtype is a categorical column and the size
# of column is 0 (i.e., empty) then we will have an
# int8 column in result._data[name] returned by libcudf,
# which needs to be type-casted to 'category' dtype.
result[name] = result[name].as_categorical_column("category")
elif (
pandas_dtypes.get(name) == "empty"
and np_dtypes.get(name) == "object"
):
# When a string column has all null values, pandas_dtype is
# is specified as 'empty' and np_dtypes as 'object',
# hence handling this special case to type-cast the empty
# float column to str column.
result[name] = result[name].as_string_column(cudf.dtype("str"))
elif name in data.column_names and isinstance(
data[name].type,
(
pa.StructType,
pa.ListType,
pa.Decimal128Type,
pa.TimestampType,
),
):
# In case of struct column, libcudf is not aware of names of
# struct fields, hence renaming the struct fields is
# necessary by extracting the field names from arrow
# struct types.
# In case of decimal column, libcudf is not aware of the
# decimal precision.
# In case of list column, there is a possibility of nested
# list columns to have struct or decimal columns inside them.
# Datetimes ("timestamps") may need timezone metadata
# attached to them, as libcudf is timezone-unaware
# All of these cases are handled by calling the
# _with_type_metadata method on the column.
result[name] = result[name]._with_type_metadata(
cudf.utils.dtypes.cudf_dtype_from_pa_type(data[name].type)
)
return cls._from_data({name: result[name] for name in column_names})
@_cudf_nvtx_annotate
def to_arrow(self):
"""
Convert to arrow Table
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame(
... {"a":[1, 2, 3], "b":[4, 5, 6]}, index=[1, 2, 3])
>>> df.to_arrow()
pyarrow.Table
a: int64
b: int64
index: int64
----
a: [[1,2,3]]
b: [[4,5,6]]
index: [[1,2,3]]
"""
return pa.Table.from_pydict(
{str(name): col.to_arrow() for name, col in self._data.items()}
)
@_cudf_nvtx_annotate
def _positions_from_column_names(self, column_names):
"""Map each column name into their positions in the frame.
The order of indices returned corresponds to the column order in this
Frame.
"""
return [
i
for i, name in enumerate(self._column_names)
if name in set(column_names)
]
@_cudf_nvtx_annotate
def _copy_type_metadata(
self,
other: Self,
*,
override_dtypes: Optional[abc.Iterable[Optional[Dtype]]] = None,
) -> Self:
"""
Copy type metadata from each column of `other` to the corresponding
column of `self`.
If override_dtypes is provided, any non-None entry
will be used in preference to the relevant column of other to
provide the new dtype.
See `ColumnBase._with_type_metadata` for more information.
"""
if override_dtypes is None:
override_dtypes = itertools.repeat(None)
dtypes = (
dtype if dtype is not None else col.dtype
for (dtype, col) in zip(override_dtypes, other._data.values())
)
for (name, col), dtype in zip(self._data.items(), dtypes):
self._data.set_by_label(
name, col._with_type_metadata(dtype), validate=False
)
return self
@_cudf_nvtx_annotate
def isna(self):
"""
Identify missing values.
Return a boolean same-sized object indicating if
the values are ``<NA>``. ``<NA>`` values gets mapped to
``True`` values. Everything else gets mapped to
``False`` values. ``<NA>`` values include:
* Values where null mask is set.
* ``NaN`` in float dtype.
* ``NaT`` in datetime64 and timedelta64 types.
Characters such as empty strings ``''`` or
``inf`` in case of float are not
considered ``<NA>`` values.
Returns
-------
DataFrame/Series/Index
Mask of bool values for each element in
the object that indicates whether an element is an NA value.
Examples
--------
Show which entries in a DataFrame are NA.
>>> import cudf
>>> import numpy as np
>>> import pandas as pd
>>> df = cudf.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5 <NA> Alfred <NA>
1 6 1939-05-27 00:00:00.000000 Batman Batmobile
2 <NA> 1940-04-25 00:00:00.000000 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = cudf.Series([5, 6, np.NaN, np.inf, -np.inf])
>>> ser
0 5.0
1 6.0
2 <NA>
3 Inf
4 -Inf
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
3 False
4 False
dtype: bool
Show which entries in an Index are NA.
>>> idx = cudf.Index([1, 2, None, np.NaN, 0.32, np.inf])
>>> idx
Float64Index([1.0, 2.0, <NA>, <NA>, 0.32, Inf], dtype='float64')
>>> idx.isna()
array([False, False, True, True, False, False])
"""
data_columns = (col.isnull() for col in self._columns)
return self._from_data_like_self(zip(self._column_names, data_columns))
# Alias for isna
isnull = isna
@_cudf_nvtx_annotate
def notna(self):
"""
Identify non-missing values.
Return a boolean same-sized object indicating if
the values are not ``<NA>``. Non-missing values get
mapped to ``True``. ``<NA>`` values get mapped to
``False`` values. ``<NA>`` values include:
* Values where null mask is set.
* ``NaN`` in float dtype.
* ``NaT`` in datetime64 and timedelta64 types.
Characters such as empty strings ``''`` or
``inf`` in case of float are not
considered ``<NA>`` values.
Returns
-------
DataFrame/Series/Index
Mask of bool values for each element in
the object that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a DataFrame are NA.
>>> import cudf
>>> import numpy as np
>>> import pandas as pd
>>> df = cudf.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5 <NA> Alfred <NA>
1 6 1939-05-27 00:00:00.000000 Batman Batmobile
2 <NA> 1940-04-25 00:00:00.000000 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are NA.
>>> ser = cudf.Series([5, 6, np.NaN, np.inf, -np.inf])
>>> ser
0 5.0
1 6.0
2 <NA>
3 Inf
4 -Inf
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
3 True
4 True
dtype: bool
Show which entries in an Index are NA.
>>> idx = cudf.Index([1, 2, None, np.NaN, 0.32, np.inf])
>>> idx
Float64Index([1.0, 2.0, <NA>, <NA>, 0.32, Inf], dtype='float64')
>>> idx.notna()
array([ True, True, False, False, True, True])
"""
data_columns = (col.notnull() for col in self._columns)
return self._from_data_like_self(zip(self._column_names, data_columns))
# Alias for notna
notnull = notna
@_cudf_nvtx_annotate
def searchsorted(
self, values, side="left", ascending=True, na_position="last"
):
"""Find indices where elements should be inserted to maintain order
Parameters
----------
value : Frame (Shape must be consistent with self)
Values to be hypothetically inserted into Self
side : str {'left', 'right'} optional, default 'left'
If 'left', the index of the first suitable location found is given
If 'right', return the last such index
ascending : bool optional, default True
Sorted Frame is in ascending order (otherwise descending)
na_position : str {'last', 'first'} optional, default 'last'
Position of null values in sorted order
Returns
-------
1-D cupy array of insertion points
Examples
--------
>>> s = cudf.Series([1, 2, 3])
>>> s.searchsorted(4)
3
>>> s.searchsorted([0, 4])
array([0, 3], dtype=int32)
>>> s.searchsorted([1, 3], side='left')
array([0, 2], dtype=int32)
>>> s.searchsorted([1, 3], side='right')
array([1, 3], dtype=int32)
If the values are not monotonically sorted, wrong
locations may be returned:
>>> s = cudf.Series([2, 1, 3])
>>> s.searchsorted(1)
0 # wrong result, correct would be 1
>>> df = cudf.DataFrame({'a': [1, 3, 5, 7], 'b': [10, 12, 14, 16]})
>>> df
a b
0 1 10
1 3 12
2 5 14
3 7 16
>>> values_df = cudf.DataFrame({'a': [0, 2, 5, 6],
... 'b': [10, 11, 13, 15]})
>>> values_df
a b
0 0 10
1 2 17
2 5 13
3 6 15
>>> df.searchsorted(values_df, ascending=False)
array([4, 4, 4, 0], dtype=int32)
"""
# Call libcudf search_sorted primitive
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
scalar_flag = None
if is_scalar(values):
scalar_flag = True
if not isinstance(values, Frame):
values = [as_column(values)]
else:
values = [*values._columns]
if len(values) != len(self._data):
raise ValueError("Mismatch number of columns to search for.")
# TODO: Change behavior based on the decision in
# https://github.com/pandas-dev/pandas/issues/54668
common_dtype_list = [
find_common_type([col.dtype, val.dtype])
for col, val in zip(self._columns, values)
]
sources = [
col
if is_dtype_equal(col.dtype, common_dtype)
else col.astype(common_dtype)
for col, common_dtype in zip(self._columns, common_dtype_list)
]
values = [
val
if is_dtype_equal(val.dtype, common_dtype)
else val.astype(common_dtype)
for val, common_dtype in zip(values, common_dtype_list)
]
outcol = libcudf.search.search_sorted(
sources,
values,
side,
ascending=ascending,
na_position=na_position,
)
# Return result as cupy array if the values is non-scalar
# If values is scalar, result is expected to be scalar.
result = cupy.asarray(outcol.data_array_view(mode="read"))
if scalar_flag:
return result[0].item()
else:
return result
@_cudf_nvtx_annotate
def argsort(
self,
by=None,
axis=0,
kind="quicksort",
order=None,
ascending=True,
na_position="last",
):
"""Return the integer indices that would sort the Series values.
Parameters
----------
by : str or list of str, default None
Name or list of names to sort by. If None, sort by all columns.
axis : {0 or "index"}
Has no effect but is accepted for compatibility with numpy.
kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See :func:`numpy.sort` for more
information. 'mergesort' and 'stable' are the only stable
algorithms. Only quicksort is supported in cuDF.
order : None
Has no effect but is accepted for compatibility with numpy.
ascending : bool or list of bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {'first' or 'last'}, default 'last'
Argument 'first' puts NaNs at the beginning, 'last' puts NaNs
at the end.
Returns
-------
cupy.ndarray: The indices sorted based on input.
Examples
--------
**Series**
>>> import cudf
>>> s = cudf.Series([3, 1, 2])
>>> s
0 3
1 1
2 2
dtype: int64
>>> s.argsort()
0 1
1 2
2 0
dtype: int32
>>> s[s.argsort()]
1 1
2 2
0 3
dtype: int64
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'foo': [3, 1, 2]})
>>> df.argsort()
array([1, 2, 0], dtype=int32)
**Index**
>>> import cudf
>>> idx = cudf.Index([3, 1, 2])
>>> idx.argsort()
array([1, 2, 0], dtype=int32)
""" # noqa: E501
if na_position not in {"first", "last"}:
raise ValueError(f"invalid na_position: {na_position}")
if kind != "quicksort":
if kind not in {"mergesort", "heapsort", "stable"}:
raise AttributeError(
f"{kind} is not a valid sorting algorithm for "
f"'DataFrame' object"
)
warnings.warn(
f"GPU-accelerated {kind} is currently not supported, "
"defaulting to quicksort."
)
if isinstance(by, str):
by = [by]
return self._get_sorted_inds(
by=by, ascending=ascending, na_position=na_position
).values
@_cudf_nvtx_annotate
def _get_sorted_inds(self, by=None, ascending=True, na_position="last"):
"""
Get the indices required to sort self according to the columns
specified in by.
"""
to_sort = [
*(
self
if by is None
else self._get_columns_by_label(list(by), downcast=False)
)._columns
]
# If given a scalar need to construct a sequence of length # of columns
if np.isscalar(ascending):
ascending = [ascending] * len(to_sort)
return libcudf.sort.order_by(
to_sort,
ascending,
na_position,
stable=True,
)
@_cudf_nvtx_annotate
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
DataFrame/Series
Absolute value of each element.
Examples
--------
Absolute numeric values in a Series
>>> s = cudf.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
"""
return self._unaryop("abs")
@_cudf_nvtx_annotate
def _is_sorted(self, ascending=None, null_position=None):
"""
Returns a boolean indicating whether the data of the Frame are sorted
based on the parameters given. Does not account for the index.
Parameters
----------
self : Frame
Frame whose columns are to be checked for sort order
ascending : None or list-like of booleans
None or list-like of boolean values indicating expected sort order
of each column. If list-like, size of list-like must be
len(columns). If None, all columns expected sort order is set to
ascending. False (0) - ascending, True (1) - descending.
null_position : None or list-like of booleans
None or list-like of boolean values indicating desired order of
nulls compared to other elements. If list-like, size of list-like
must be len(columns). If None, null order is set to before. False
(0) - before, True (1) - after.
Returns
-------
returns : boolean
Returns True, if sorted as expected by ``ascending`` and
``null_position``, False otherwise.
"""
if ascending is not None and not cudf.api.types.is_list_like(
ascending
):
raise TypeError(
f"Expected a list-like or None for `ascending`, got "
f"{type(ascending)}"
)
if null_position is not None and not cudf.api.types.is_list_like(
null_position
):
raise TypeError(
f"Expected a list-like or None for `null_position`, got "
f"{type(null_position)}"
)
return libcudf.sort.is_sorted(
[*self._columns], ascending=ascending, null_position=null_position
)
@_cudf_nvtx_annotate
def _split(self, splits):
"""Split a frame with split points in ``splits``. Returns a list of
Frames of length `len(splits) + 1`.
"""
return [
self._from_columns_like_self(
libcudf.copying.columns_split([*self._data.columns], splits)[
split_idx
],
self._column_names,
)
for split_idx in range(len(splits) + 1)
]
@_cudf_nvtx_annotate
def _encode(self):
columns, indices = libcudf.transform.table_encode([*self._columns])
keys = self._from_columns_like_self(columns)
return keys, indices
@_cudf_nvtx_annotate
def _unaryop(self, op):
data_columns = (col.unary_operator(op) for col in self._columns)
return self._from_data_like_self(zip(self._column_names, data_columns))
@classmethod
@_cudf_nvtx_annotate
def _colwise_binop(
cls,
operands: Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]],
fn: str,
):
"""Implement binary ops between two frame-like objects.
Binary operations for Frames can be reduced to a sequence of binary
operations between column-like objects. Different types of frames need
to preprocess different inputs, so subclasses should implement binary
operations as a preprocessing step that calls this method.
Parameters
----------
operands : Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]]
A mapping from column names to a tuple containing left and right
operands as well as a boolean indicating whether or not to reflect
an operation and fill value for nulls.
fn : str
The operation to perform.
Returns
-------
Dict[ColumnBase]
A dict of columns constructed from the result of performing the
requested operation on the operands.
"""
# Now actually perform the binop on the columns in left and right.
output = {}
for (
col,
(left_column, right_column, reflect, fill_value),
) in operands.items():
output_mask = None
if fill_value is not None:
left_is_column = isinstance(left_column, ColumnBase)
right_is_column = isinstance(right_column, ColumnBase)
if left_is_column and right_is_column:
# If both columns are nullable, pandas semantics dictate
# that nulls that are present in both left_column and
# right_column are not filled.
if left_column.nullable and right_column.nullable:
with acquire_spill_lock():
lmask = as_column(left_column.nullmask)
rmask = as_column(right_column.nullmask)
output_mask = (lmask | rmask).data
left_column = left_column.fillna(fill_value)
right_column = right_column.fillna(fill_value)
elif left_column.nullable:
left_column = left_column.fillna(fill_value)
elif right_column.nullable:
right_column = right_column.fillna(fill_value)
elif left_is_column:
if left_column.nullable:
left_column = left_column.fillna(fill_value)
elif right_is_column:
if right_column.nullable:
right_column = right_column.fillna(fill_value)
else:
assert False, "At least one operand must be a column."
# TODO: Disable logical and binary operators between columns that
# are not numerical using the new binops mixin.
outcol = (
getattr(operator, fn)(right_column, left_column)
if reflect
else getattr(operator, fn)(left_column, right_column)
)
if output_mask is not None:
outcol = outcol.set_mask(output_mask)
output[col] = outcol
return output
@_cudf_nvtx_annotate
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return _array_ufunc(self, ufunc, method, inputs, kwargs)
@_cudf_nvtx_annotate
@acquire_spill_lock()
def _apply_cupy_ufunc_to_operands(
self, ufunc, cupy_func, operands, **kwargs
):
# Note: There are some operations that may be supported by libcudf but
# are not supported by pandas APIs. In particular, libcudf binary
# operations support logical and/or operations as well as
# trigonometric, but those operations are not defined on
# pd.Series/DataFrame. For now those operations will dispatch to cupy,
# but if ufuncs are ever a bottleneck we could add special handling to
# dispatch those (or any other) functions that we could implement
# without cupy.
mask = None
data = [{} for _ in range(ufunc.nout)]
for name, (left, right, _, _) in operands.items():
cupy_inputs = []
for inp in (left, right) if ufunc.nin == 2 else (left,):
if isinstance(inp, ColumnBase) and inp.has_nulls():
new_mask = as_column(inp.nullmask)
# TODO: This is a hackish way to perform a bitwise and
# of bitmasks. Once we expose
# cudf::detail::bitwise_and, then we can use that
# instead.
mask = new_mask if mask is None else (mask & new_mask)
# Arbitrarily fill with zeros. For ufuncs, we assume
# that the end result propagates nulls via a bitwise
# and, so these elements are irrelevant.
inp = inp.fillna(0)
cupy_inputs.append(cupy.asarray(inp))
cp_output = cupy_func(*cupy_inputs, **kwargs)
if ufunc.nout == 1:
cp_output = (cp_output,)
for i, out in enumerate(cp_output):
data[i][name] = as_column(out).set_mask(mask)
return data
@_cudf_nvtx_annotate
def dot(self, other, reflect=False):
"""
Get dot product of frame and other, (binary operator `dot`).
Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`,
`dot`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`,
`@`.
Parameters
----------
other : Sequence, Series, or DataFrame
Any multiple element data structure, or list-like object.
reflect : bool, default False
If ``True``, swap the order of the operands. See
https://docs.python.org/3/reference/datamodel.html#object.__ror__
for more information on when this is necessary.
Returns
-------
scalar, Series, or DataFrame
The result of the operation.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame([[1, 2, 3, 4],
... [5, 6, 7, 8]])
>>> df @ df.T
0 1
0 30 70
1 70 174
>>> s = cudf.Series([1, 1, 1, 1])
>>> df @ s
0 10
1 26
dtype: int64
>>> [1, 2, 3, 4] @ s
10
"""
# TODO: This function does not currently support nulls.
lhs = self.values
result_index = None
result_cols = None
if isinstance(self, cudf.Series) and isinstance(
other, (cudf.Series, cudf.DataFrame)
):
common = self.index.union(other.index)
if len(common) > len(self.index) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
lhs = self.reindex(index=common, copy=False).values
rhs = other.reindex(index=common, copy=False).values
if isinstance(other, cudf.DataFrame):
result_index = other._data.to_pandas_index()
elif isinstance(self, cudf.DataFrame) and isinstance(
other, (cudf.Series, cudf.DataFrame)
):
common = self._data.to_pandas_index().union(
other.index.to_pandas()
)
if len(common) > len(self._data.names) or len(common) > len(
other.index
):
raise ValueError("matrices are not aligned")
lhs = self.reindex(columns=common, copy=False)
result_index = lhs.index
rhs = other.reindex(index=common, copy=False).values
lhs = lhs.values
if isinstance(other, cudf.DataFrame):
result_cols = other._data.to_pandas_index()
elif isinstance(
other, (cupy.ndarray, np.ndarray)
) or can_convert_to_column(other):
rhs = cupy.asarray(other)
else:
# TODO: This should raise an exception, not return NotImplemented,
# but __matmul__ relies on the current behavior. We should either
# move this implementation to __matmul__ and call it from here
# (checking for NotImplemented and raising NotImplementedError if
# that's what's returned), or __matmul__ should catch a
# NotImplementedError from here and return NotImplemented. The
# latter feels cleaner (putting the implementation in this method
# rather than in the operator) but will be slower in the (highly
# unlikely) case that we're multiplying a cudf object with another
# type of object that somehow supports this behavior.
return NotImplemented
if reflect:
lhs, rhs = rhs, lhs
result = lhs.dot(rhs)
if len(result.shape) == 1:
return cudf.Series(
result,
index=self.index if result_index is None else result_index,
)
if len(result.shape) == 2:
return cudf.DataFrame(
result,
index=self.index if result_index is None else result_index,
columns=result_cols,
)
return result.item()
@_cudf_nvtx_annotate
def __matmul__(self, other):
return self.dot(other)
@_cudf_nvtx_annotate
def __rmatmul__(self, other):
return self.dot(other, reflect=True)
# Unary logical operators
@_cudf_nvtx_annotate
def __neg__(self):
"""Negate for integral dtypes, logical NOT for bools."""
return self._from_data_like_self(
{
name: col.unary_operator("not")
if is_bool_dtype(col.dtype)
else -1 * col
for name, col in self._data.items()
}
)
@_cudf_nvtx_annotate
def __pos__(self):
return self.copy(deep=True)
@_cudf_nvtx_annotate
def __abs__(self):
return self._unaryop("abs")
# Reductions
@classmethod
@_cudf_nvtx_annotate
def _get_axis_from_axis_arg(cls, axis):
try:
return cls._SUPPORT_AXIS_LOOKUP[axis]
except KeyError:
raise ValueError(f"No axis named {axis} for object type {cls}")
@_cudf_nvtx_annotate
def _reduce(self, *args, **kwargs):
raise NotImplementedError(
f"Reductions are not supported for objects of type {type(self)}."
)
@_cudf_nvtx_annotate
def min(
self,
axis=no_default,
skipna=True,
level=None,
numeric_only=None,
**kwargs,
):
"""
Return the minimum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
level: int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a Series.
numeric_only: bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.min()
a 1
b 7
dtype: int64
"""
return self._reduce(
"min",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
def max(
self,
axis=no_default,
skipna=True,
level=None,
numeric_only=None,
**kwargs,
):
"""
Return the maximum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
level: int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a Series.
numeric_only: bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.max()
a 4
b 10
dtype: int64
"""
return self._reduce(
"max",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
def sum(
self,
axis=no_default,
skipna=True,
dtype=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
"""
Return sum of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
dtype: data type
Data type to cast the result to.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.sum()
a 10
b 34
dtype: int64
"""
return self._reduce(
"sum",
axis=axis,
skipna=skipna,
dtype=dtype,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
@_cudf_nvtx_annotate
def product(
self,
axis=no_default,
skipna=True,
dtype=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs,
):
"""
Return product of the values in the DataFrame.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
dtype: data type
Data type to cast the result to.
min_count: int, default 0
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result
will be NA.
The default being 0. This means the sum of an all-NA or empty
Series is 0, and the product of an all-NA or empty Series is 1.
Returns
-------
Series
Notes
-----
Parameters currently not supported are level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.product()
a 24
b 5040
dtype: int64
"""
return self._reduce(
# cuDF columns use "product" as the op name, but cupy uses "prod"
# and we need cupy if axis == 1.
"prod" if axis in {1, "columns"} else "product",
axis=axis,
skipna=skipna,
dtype=dtype,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs,
)
# Alias for pandas compatibility.
prod = product
@_cudf_nvtx_annotate
def mean(
self,
axis=no_default,
skipna=True,
level=None,
numeric_only=None,
**kwargs,
):
"""
Return the mean of the values for the requested axis.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a Series.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data. Not implemented for
Series.
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
mean : Series or DataFrame (if level specified)
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.mean()
a 2.5
b 8.5
dtype: float64
"""
return self._reduce(
"mean",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
def std(
self,
axis=no_default,
skipna=True,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
"""
Return sample standard deviation of the DataFrame.
Normalized by N-1 by default. This can be changed using
the `ddof` argument
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof: int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is N - ddof, where N represents the number of elements.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `level` and
`numeric_only`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.std()
a 1.290994
b 1.290994
dtype: float64
"""
return self._reduce(
"std",
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
def var(
self,
axis=no_default,
skipna=True,
level=None,
ddof=1,
numeric_only=None,
**kwargs,
):
"""
Return unbiased variance of the DataFrame.
Normalized by N-1 by default. This can be changed using the
ddof argument.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
ddof: int, default 1
Delta Degrees of Freedom. The divisor used in calculations is
N - ddof, where N represents the number of elements.
Returns
-------
scalar
Notes
-----
Parameters currently not supported are `level` and
`numeric_only`
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.var()
a 1.666667
b 1.666667
dtype: float64
"""
return self._reduce(
"var",
axis=axis,
skipna=skipna,
level=level,
ddof=ddof,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
def kurtosis(
self,
axis=no_default,
skipna=True,
level=None,
numeric_only=None,
**kwargs,
):
"""
Return Fisher's unbiased kurtosis of a sample.
Kurtosis obtained using Fisher's definition of
kurtosis (kurtosis of normal == 0.0). Normalized by N-1.
Parameters
----------
axis: {index (0), columns(1)}
Axis for the function to be applied on.
skipna: bool, default True
Exclude NA/null values when computing the result.
Returns
-------
Series or scalar
Notes
-----
Parameters currently not supported are `level` and `numeric_only`
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4])
>>> series.kurtosis()
-1.1999999999999904
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 4], 'b': [7, 8, 9, 10]})
>>> df.kurt()
a -1.2
b -1.2
dtype: float64
"""
if axis not in (0, "index", None, no_default):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._reduce(
"kurtosis",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
# Alias for kurtosis.
kurt = kurtosis
@_cudf_nvtx_annotate
def skew(
self,
axis=no_default,
skipna=True,
level=None,
numeric_only=None,
**kwargs,
):
"""
Return unbiased Fisher-Pearson skew of a sample.
Parameters
----------
skipna: bool, default True
Exclude NA/null values when computing the result.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `axis`, `level` and
`numeric_only`
Examples
--------
**Series**
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4, 5, 6, 6])
>>> series
0 1
1 2
2 3
3 4
4 5
5 6
6 6
dtype: int64
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 8, 10, 10]})
>>> df.skew()
a 0.00000
b -0.37037
dtype: float64
"""
if axis not in (0, "index", None, no_default):
raise NotImplementedError("Only axis=0 is currently supported.")
return self._reduce(
"skew",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
def all(self, axis=0, skipna=True, level=None, **kwargs):
"""
Return whether all elements are True in DataFrame.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced. For `Series`
this parameter is unused and defaults to `0`.
- 0 or 'index' : reduce the index, return a Series
whose index is the original column labels.
- 1 or 'columns' : reduce the columns, return a Series
whose index is the original index.
- None : reduce all axes, return a scalar.
skipna: bool, default True
Exclude NA/null values. If the entire row/column is NA and
skipna is True, then the result will be True, as for an
empty row/column.
If skipna is False, then NA are treated as True, because
these are not equal to zero.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `bool_only`, `level`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 0, 10, 10]})
>>> df.all()
a True
b False
dtype: bool
"""
return self._reduce(
"all",
axis=axis,
skipna=skipna,
level=level,
**kwargs,
)
@_cudf_nvtx_annotate
def any(self, axis=0, skipna=True, level=None, **kwargs):
"""
Return whether any elements is True in DataFrame.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced. For `Series`
this parameter is unused and defaults to `0`.
- 0 or 'index' : reduce the index, return a Series
whose index is the original column labels.
- 1 or 'columns' : reduce the columns, return a Series
whose index is the original index.
- None : reduce all axes, return a scalar.
skipna: bool, default True
Exclude NA/null values. If the entire row/column is NA and
skipna is True, then the result will be False, as for an
empty row/column.
If skipna is False, then NA are treated as True, because
these are not equal to zero.
Returns
-------
Series
Notes
-----
Parameters currently not supported are `bool_only`, `level`.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [3, 2, 3, 4], 'b': [7, 0, 10, 10]})
>>> df.any()
a True
b True
dtype: bool
"""
return self._reduce(
"any",
axis=axis,
skipna=skipna,
level=level,
**kwargs,
)
@_cudf_nvtx_annotate
def median(
self, axis=None, skipna=True, level=None, numeric_only=None, **kwargs
):
"""
Return the median of the values for the requested axis.
Parameters
----------
skipna : bool, default True
Exclude NA/null values when computing the result.
Returns
-------
scalar
Notes
-----
Parameters currently not supported are `level` and `numeric_only`.
Examples
--------
>>> import cudf
>>> ser = cudf.Series([10, 25, 3, 25, 24, 6])
>>> ser
0 10
1 25
2 3
3 25
4 24
5 6
dtype: int64
>>> ser.median()
17.0
"""
return self._reduce(
"median",
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
**kwargs,
)
@_cudf_nvtx_annotate
@ioutils.doc_to_json()
def to_json(self, path_or_buf=None, *args, **kwargs):
"""{docstring}"""
return cudf.io.json.to_json(
self, path_or_buf=path_or_buf, *args, **kwargs
)
@_cudf_nvtx_annotate
@ioutils.doc_to_hdf()
def to_hdf(self, path_or_buf, key, *args, **kwargs):
"""{docstring}"""
cudf.io.hdf.to_hdf(path_or_buf, key, self, *args, **kwargs)
@_cudf_nvtx_annotate
@ioutils.doc_to_dlpack()
def to_dlpack(self):
"""{docstring}"""
return cudf.io.dlpack.to_dlpack(self)
@_cudf_nvtx_annotate
def to_string(self):
r"""
Convert to string
cuDF uses Pandas internals for efficient string formatting.
Set formatting options using pandas string formatting options and
cuDF objects will print identically to Pandas objects.
cuDF supports `null/None` as a value in any column type, which
is transparently supported during this output process.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2]
>>> df['val'] = [float(i + 10) for i in range(3)]
>>> df.to_string()
' key val\n0 0 10.0\n1 1 11.0\n2 2 12.0'
"""
return repr(self)
@_cudf_nvtx_annotate
def __str__(self):
return self.to_string()
@_cudf_nvtx_annotate
def __deepcopy__(self, memo):
return self.copy(deep=True)
@_cudf_nvtx_annotate
def __copy__(self):
return self.copy(deep=False)
@_cudf_nvtx_annotate
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
For negative values of `n`, this function returns all rows except
the last `n` rows, equivalent to ``df[:-n]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
DataFrame or Series
The first `n` rows of the caller object.
Examples
--------
**Series**
>>> ser = cudf.Series(['alligator', 'bee', 'falcon',
... 'lion', 'monkey', 'parrot', 'shark', 'whale', 'zebra'])
>>> ser
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
dtype: object
Viewing the first 5 lines
>>> ser.head()
0 alligator
1 bee
2 falcon
3 lion
4 monkey
dtype: object
Viewing the first `n` lines (three in this case)
>>> ser.head(3)
0 alligator
1 bee
2 falcon
dtype: object
For negative values of `n`
>>> ser.head(-3)
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
dtype: object
**DataFrame**
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df.head(2)
key val
0 0 10.0
1 1 11.0
"""
return self.iloc[:n]
@_cudf_nvtx_annotate
def tail(self, n=5):
"""
Returns the last n rows as a new DataFrame or Series
Examples
--------
**DataFrame**
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df.tail(2)
key val
3 3 13.0
4 4 14.0
**Series**
>>> import cudf
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.tail(2)
3 1
4 0
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
@_cudf_nvtx_annotate
@copy_docstring(Rolling)
def rolling(
self, window, min_periods=None, center=False, axis=0, win_type=None
):
return Rolling(
self,
window,
min_periods=min_periods,
center=center,
axis=axis,
win_type=win_type,
)
@_cudf_nvtx_annotate
def nans_to_nulls(self):
"""
Convert nans (if any) to nulls
Returns
-------
DataFrame or Series
Examples
--------
**Series**
>>> import cudf, numpy as np
>>> series = cudf.Series([1, 2, np.nan, None, 10], nan_as_null=False)
>>> series
0 1.0
1 2.0
2 NaN
3 <NA>
4 10.0
dtype: float64
>>> series.nans_to_nulls()
0 1.0
1 2.0
2 <NA>
3 <NA>
4 10.0
dtype: float64
**DataFrame**
>>> df = cudf.DataFrame()
>>> df['a'] = cudf.Series([1, None, np.nan], nan_as_null=False)
>>> df['b'] = cudf.Series([None, 3.14, np.nan], nan_as_null=False)
>>> df
a b
0 1.0 <NA>
1 <NA> 3.14
2 NaN NaN
>>> df.nans_to_nulls()
a b
0 1.0 <NA>
1 <NA> 3.14
2 <NA> <NA>
"""
result_data = {}
for name, col in self._data.items():
try:
result_data[name] = col.nans_to_nulls()
except AttributeError:
result_data[name] = col.copy()
return self._from_data_like_self(result_data)
@_cudf_nvtx_annotate
def __invert__(self):
"""Bitwise invert (~) for integral dtypes, logical NOT for bools."""
return self._from_data_like_self(
{
name: _apply_inverse_column(col)
for name, col in self._data.items()
}
)
@_cudf_nvtx_annotate
def nunique(self, dropna: bool = True):
"""
Returns a per column mapping with counts of unique values for
each column.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
dict
Name and unique value counts of each column in frame.
"""
return {
name: col.distinct_count(dropna=dropna)
for name, col in self._data.items()
}
@staticmethod
@_cudf_nvtx_annotate
def _repeat(
columns: List[ColumnBase], repeats, axis=None
) -> List[ColumnBase]:
if axis is not None:
raise NotImplementedError(
"Only axis=`None` supported at this time."
)
if not is_scalar(repeats):
repeats = as_column(repeats)
return libcudf.filling.repeat(columns, repeats)
@_cudf_nvtx_annotate
@_warn_no_dask_cudf
def __dask_tokenize__(self):
return [
type(self),
self._dtypes,
self.to_pandas(),
]
def _apply_inverse_column(col: ColumnBase) -> ColumnBase:
"""Bitwise invert (~) for integral dtypes, logical NOT for bools."""
if np.issubdtype(col.dtype, np.integer):
return col.unary_operator("invert")
elif is_bool_dtype(col.dtype):
return col.unary_operator("not")
else:
raise TypeError(
f"Operation `~` not supported on {col.dtype.type.__name__}"
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/common.py
|
# Copyright (c) 2020, NVIDIA CORPORATION.
def pipe(obj, func, *args, **kwargs):
"""
Apply a function ``func`` to object ``obj`` either by passing obj as the
first argument to the function or, in the case that the func is a tuple,
interpret the first element of the tuple as a function and pass the obj to
that function as a keyword argument whose key is the value of the second
element of the tuple.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this object or, alternatively, a
``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of `callable`` that expects the
object.
*args : iterable, optional
Positional arguments passed into ``func``.
**kwargs : dict, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
raise ValueError(
f"{target} is both the pipe target and a keyword argument"
)
kwargs[target] = obj
return func(*args, **kwargs)
else:
return func(obj, *args, **kwargs)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/byte_pair_encoding.py
|
# Copyright (c) 2023, NVIDIA CORPORATION.
from __future__ import annotations
import cudf
from cudf._lib.nvtext.byte_pair_encode import (
BPEMergePairs as cpp_merge_pairs,
byte_pair_encoding as cpp_byte_pair_encoding,
)
class BytePairEncoder:
"""
Given a merge pairs strings series, performs byte pair encoding on
a strings series using the provided separator.
Parameters
----------
merges_pairs : str
Strings column of merge pairs
Returns
-------
BytePairEncoder
"""
def __init__(self, merges_pair: "cudf.Series"):
self.merge_pairs = cpp_merge_pairs(merges_pair._column)
def __call__(self, text, separator: str = " "):
"""
Parameters
----------
text : cudf string series
The strings to be encoded.
Returns
-------
Encoded strings
Examples
--------
>>> import cudf
>>> from cudf.core.byte_pair_encoding import BytePairEncoder
>>> mps = cudf.Series(["e n", "i t", "i s", "e s", "en t",
... "c e", "es t", "en ce", "T h", "Th is",
... "t est", "s ent", "t h", "th is"])
>>> bpe = BytePairEncoder(mps)
>>> str_series = cudf.Series(['This is the sentence', 'thisisit'])
>>> bpe(str_series)
0 This is a sent ence
1 this is it
dtype: object
"""
sep = cudf.Scalar(separator, dtype="str")
result = cpp_byte_pair_encoding(text._column, self.merge_pairs, sep)
return cudf.Series(result)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/missing.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
# Pandas NAType enforces a single instance exists at a time
# instantiating this class will yield the existing instance
# of pandas._libs.missing.NAType, id(cudf.NA) == id(pd.NA).
from pandas import NA, NaT
__all__ = ["NA", "NaT"]
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/series.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from __future__ import annotations
import functools
import inspect
import pickle
import textwrap
import warnings
from collections import abc
from shutil import get_terminal_size
from typing import (
Any,
Dict,
MutableMapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
import cupy
import numpy as np
import pandas as pd
from pandas._config import get_option
from pandas.core.dtypes.common import is_float
from typing_extensions import Self, assert_never
import cudf
from cudf import _lib as libcudf
from cudf._typing import (
ColumnLike,
DataFrameOrSeries,
NotImplementedType,
ScalarLike,
)
from cudf.api.extensions import no_default
from cudf.api.types import (
_is_non_decimal_numeric_dtype,
_is_scalar_or_zero_d_array,
is_bool_dtype,
is_decimal_dtype,
is_dict_like,
is_float_dtype,
is_integer,
is_integer_dtype,
is_list_dtype,
is_scalar,
is_string_dtype,
is_struct_dtype,
)
from cudf.core import indexing_utils
from cudf.core.abc import Serializable
from cudf.core.buffer import acquire_spill_lock
from cudf.core.column import (
ColumnBase,
DatetimeColumn,
IntervalColumn,
TimeDeltaColumn,
arange,
as_column,
column,
full,
)
from cudf.core.column.categorical import (
CategoricalAccessor as CategoricalAccessor,
)
from cudf.core.column.column import concat_columns
from cudf.core.column.lists import ListMethods
from cudf.core.column.string import StringMethods
from cudf.core.column.struct import StructMethods
from cudf.core.column_accessor import ColumnAccessor
from cudf.core.groupby.groupby import SeriesGroupBy, groupby_doc_template
from cudf.core.index import BaseIndex, DatetimeIndex, RangeIndex, as_index
from cudf.core.indexed_frame import (
IndexedFrame,
_FrameIndexer,
_get_label_range_or_mask,
_indices_from_labels,
doc_reset_index_template,
)
from cudf.core.resample import SeriesResampler
from cudf.core.single_column_frame import SingleColumnFrame
from cudf.core.udf.scalar_function import _get_scalar_kernel
from cudf.errors import MixedTypeError
from cudf.utils import docutils
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import (
can_convert_to_column,
find_common_type,
is_mixed_with_object_dtype,
to_cudf_compatible_scalar,
)
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
def _format_percentile_names(percentiles):
return [f"{int(x * 100)}%" for x in percentiles]
def _describe_numeric(obj, percentiles):
# Helper for Series.describe with numerical data.
data = {
"count": obj.count(),
"mean": obj.mean(),
"std": obj.std(),
"min": obj.min(),
**dict(
zip(
_format_percentile_names(percentiles),
obj.quantile(percentiles).to_numpy(na_value=np.nan).tolist(),
)
),
"max": obj.max(),
}
return {k: round(v, 6) for k, v in data.items()}
def _describe_timetype(obj, percentiles, typ):
# Common helper for Series.describe with timedelta/timestamp data.
data = {
"count": str(obj.count()),
"mean": str(typ(obj.mean())),
"std": "",
"min": str(typ(obj.min())),
**dict(
zip(
_format_percentile_names(percentiles),
obj.quantile(percentiles)
.astype("str")
.to_numpy(na_value=np.nan)
.tolist(),
)
),
"max": str(typ(obj.max())),
}
if typ is pd.Timedelta:
data["std"] = str(obj.std())
else:
data.pop("std")
return data
def _describe_timedelta(obj, percentiles):
# Helper for Series.describe with timedelta data.
return _describe_timetype(obj, percentiles, pd.Timedelta)
def _describe_timestamp(obj, percentiles):
# Helper for Series.describe with timestamp data.
return _describe_timetype(obj, percentiles, pd.Timestamp)
def _describe_categorical(obj, percentiles):
# Helper for Series.describe with categorical data.
data = {
"count": obj.count(),
"unique": len(obj.unique()),
"top": None,
"freq": None,
}
if data["count"] > 0:
# In case there's a tie, break the tie by sorting the index
# and take the top.
val_counts = obj.value_counts(ascending=False)
tied_val_counts = val_counts[
val_counts == val_counts.iloc[0]
].sort_index()
data.update(
{
"top": tied_val_counts.index[0],
"freq": tied_val_counts.iloc[0],
}
)
return data
def _append_new_row_inplace(col: ColumnLike, value: ScalarLike):
"""Append a scalar `value` to the end of `col` inplace.
Cast to common type if possible
"""
to_type = find_common_type([type(value), col.dtype])
val_col = as_column(value, dtype=to_type)
old_col = col.astype(to_type)
col._mimic_inplace(concat_columns([old_col, val_col]), inplace=True)
class _SeriesIlocIndexer(_FrameIndexer):
"""
For integer-location based selection.
"""
_frame: cudf.Series
@_cudf_nvtx_annotate
def __getitem__(self, arg):
indexing_spec = indexing_utils.parse_row_iloc_indexer(
indexing_utils.destructure_series_iloc_indexer(arg, self._frame),
len(self._frame),
)
return self._frame._getitem_preprocessed(indexing_spec)
@_cudf_nvtx_annotate
def __setitem__(self, key, value):
from cudf.core.column import column
if isinstance(key, tuple):
key = list(key)
# coerce value into a scalar or column
if is_scalar(value):
value = to_cudf_compatible_scalar(value)
if (
not isinstance(
self._frame._column,
(
cudf.core.column.DatetimeColumn,
cudf.core.column.TimeDeltaColumn,
),
)
and cudf.utils.utils._isnat(value)
and not (
isinstance(
self._frame._column, cudf.core.column.StringColumn
)
and isinstance(value, str)
)
):
raise MixedTypeError(
f"Cannot assign {value=} to non-datetime/non-timedelta "
"columns"
)
elif (
not (
is_float_dtype(self._frame._column.dtype)
or (
isinstance(
self._frame._column.dtype, cudf.CategoricalDtype
)
and is_float_dtype(
self._frame._column.dtype.categories.dtype
)
)
)
and isinstance(value, (np.float32, np.float64))
and np.isnan(value)
):
raise MixedTypeError(
f"Cannot assign {value=} to "
f"non-float dtype={self._frame._column.dtype}"
)
elif (
is_bool_dtype(self._frame._column.dtype)
and not is_bool_dtype(value)
and value not in {None, cudf.NA}
):
raise MixedTypeError(
f"Cannot assign {value=} to "
f"bool dtype={self._frame._column.dtype}"
)
elif not (
isinstance(value, (list, dict))
and isinstance(
self._frame._column.dtype, (cudf.ListDtype, cudf.StructDtype)
)
):
value = column.as_column(value)
if (
(
_is_non_decimal_numeric_dtype(self._frame._column.dtype)
or is_string_dtype(self._frame._column.dtype)
)
and hasattr(value, "dtype")
and _is_non_decimal_numeric_dtype(value.dtype)
):
# normalize types if necessary:
# In contrast to Column.__setitem__ (which downcasts the value to
# the dtype of the column) here we upcast the series to the
# larger data type mimicking pandas
to_dtype = np.result_type(value.dtype, self._frame._column.dtype)
value = value.astype(to_dtype)
if to_dtype != self._frame._column.dtype:
self._frame._column._mimic_inplace(
self._frame._column.astype(to_dtype), inplace=True
)
self._frame._column[key] = value
class _SeriesLocIndexer(_FrameIndexer):
"""
Label-based selection
"""
@_cudf_nvtx_annotate
def __getitem__(self, arg: Any) -> Union[ScalarLike, DataFrameOrSeries]:
if isinstance(arg, pd.MultiIndex):
arg = cudf.from_pandas(arg)
if isinstance(self._frame.index, cudf.MultiIndex) and not isinstance(
arg, cudf.MultiIndex
):
if is_scalar(arg):
row_arg = (arg,)
else:
row_arg = arg
result = self._frame.index._get_row_major(self._frame, row_arg)
if (
isinstance(arg, tuple)
and len(arg) == self._frame._index.nlevels
and not any(isinstance(x, slice) for x in arg)
):
result = result.iloc[0]
return result
try:
arg = self._loc_to_iloc(arg)
except (TypeError, KeyError, IndexError, ValueError):
raise KeyError(arg)
return self._frame.iloc[arg]
@_cudf_nvtx_annotate
def __setitem__(self, key, value):
try:
key = self._loc_to_iloc(key)
except KeyError as e:
if (
is_scalar(key)
and not isinstance(self._frame.index, cudf.MultiIndex)
and is_scalar(value)
):
# TODO: Modifying index in place is bad because
# our index are immutable, but columns are not (which
# means our index are mutable with internal APIs).
# Get rid of the deep copy once columns too are
# immutable.
idx_copy = self._frame._index.copy(deep=True)
if (
isinstance(idx_copy, cudf.RangeIndex)
and isinstance(key, int)
and (key == idx_copy[-1] + idx_copy.step)
):
idx_copy = cudf.RangeIndex(
start=idx_copy.start,
stop=idx_copy.stop + idx_copy.step,
step=idx_copy.step,
name=idx_copy.name,
)
else:
if isinstance(idx_copy, cudf.RangeIndex):
idx_copy = idx_copy._as_int_index()
_append_new_row_inplace(idx_copy._values, key)
self._frame._index = idx_copy
_append_new_row_inplace(self._frame._column, value)
return
else:
raise e
if isinstance(value, (pd.Series, cudf.Series)):
value = cudf.Series(value)
value = value._align_to_index(self._frame.index, how="right")
self._frame.iloc[key] = value
def _loc_to_iloc(self, arg):
if isinstance(arg, tuple) and arg and isinstance(arg[0], slice):
if len(arg) > 1:
raise IndexError("Too many Indexers")
arg = arg[0]
if _is_scalar_or_zero_d_array(arg):
index_dtype = self._frame.index.dtype
if not _is_non_decimal_numeric_dtype(index_dtype) and not (
isinstance(index_dtype, cudf.CategoricalDtype)
and is_integer_dtype(index_dtype.categories.dtype)
):
# TODO: switch to cudf.utils.dtypes.is_integer(arg)
if isinstance(arg, cudf.Scalar) and is_integer_dtype(
arg.dtype
):
found_index = arg.value
return found_index
elif is_integer(arg):
found_index = arg
return found_index
try:
indices = self._frame.index._indices_of(arg)
if (n := len(indices)) == 0:
raise KeyError("Label scalar is out of bounds")
elif n == 1:
return indices.element_indexing(0)
else:
return indices
except (TypeError, KeyError, IndexError, ValueError):
raise KeyError("Label scalar is out of bounds")
elif isinstance(arg, slice):
return _get_label_range_or_mask(
self._frame.index, arg.start, arg.stop, arg.step
)
elif isinstance(arg, (cudf.MultiIndex, pd.MultiIndex)):
if isinstance(arg, pd.MultiIndex):
arg = cudf.MultiIndex.from_pandas(arg)
return _indices_from_labels(self._frame, arg)
else:
arg = cudf.core.series.Series(cudf.core.column.as_column(arg))
if arg.dtype in (bool, np.bool_):
return arg
else:
indices = _indices_from_labels(self._frame, arg)
if indices.null_count > 0:
raise KeyError("label scalar is out of bound")
return indices
class Series(SingleColumnFrame, IndexedFrame, Serializable):
"""
One-dimensional GPU array (including time series).
Labels need not be unique but must be a hashable type. The object
supports both integer- and label-based indexing and provides a
host of methods for performing operations involving the index.
Statistical methods from ndarray have been overridden to
automatically exclude missing data (currently represented
as null/NaN).
Operations between Series (`+`, `-`, `/`, `*`, `**`) align
values based on their associated index values, they need
not be the same length. The result index will be the
sorted union of the two indexes.
``Series`` objects are used as columns of ``DataFrame``.
Parameters
----------
data : array-like, Iterable, dict, or scalar value
Contains data stored in Series.
index : array-like or Index (1d)
Values must be hashable and have the same length
as data. Non-unique index values are allowed. Will
default to RangeIndex (0, 1, 2, ..., n) if not provided.
If both a dict and index sequence are used, the index will
override the keys found in the dict.
dtype : str, :class:`numpy.dtype`, or ExtensionDtype, optional
Data type for the output Series. If not specified,
this will be inferred from data.
name : str, optional
The name to give to the Series.
copy : bool, default False
Copy input data. Only affects Series or 1d ndarray input.
nan_as_null : bool, Default True
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
"""
_accessors: Set[Any] = set()
_loc_indexer_type = _SeriesLocIndexer
_iloc_indexer_type = _SeriesIlocIndexer
_groupby = SeriesGroupBy
_resampler = SeriesResampler
# The `constructor*` properties are used by `dask` (and `dask_cudf`)
@property
def _constructor(self):
return Series
@property
def _constructor_sliced(self):
raise NotImplementedError(
"_constructor_sliced not supported for Series!"
)
@property
def _constructor_expanddim(self):
return cudf.DataFrame
@classmethod
@_cudf_nvtx_annotate
def from_categorical(cls, categorical, codes=None):
"""Creates from a pandas.Categorical
Parameters
----------
categorical : pandas.Categorical
Contains data stored in a pandas Categorical.
codes : array-like, optional.
The category codes of this categorical. If ``codes`` are
defined, they are used instead of ``categorical.codes``
Returns
-------
Series
A cudf categorical series.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> pd_categorical = pd.Categorical(pd.Series(['a', 'b', 'c', 'a'], dtype='category'))
>>> pd_categorical
['a', 'b', 'c', 'a']
Categories (3, object): ['a', 'b', 'c']
>>> series = cudf.Series.from_categorical(pd_categorical)
>>> series
0 a
1 b
2 c
3 a
dtype: category
Categories (3, object): ['a', 'b', 'c']
""" # noqa: E501
col = cudf.core.column.categorical.pandas_categorical_as_column(
categorical, codes=codes
)
return Series(data=col)
@classmethod
@_cudf_nvtx_annotate
def from_masked_array(cls, data, mask, null_count=None):
"""Create a Series with null-mask.
This is equivalent to:
Series(data).set_mask(mask, null_count=null_count)
Parameters
----------
data : 1D array-like
The values. Null values must not be skipped. They can appear
as garbage values.
mask : 1D array-like
The null-mask. Valid values are marked as ``1``; otherwise ``0``.
The mask bit given the data index ``idx`` is computed as::
(mask[idx // 8] >> (idx % 8)) & 1
null_count : int, optional
The number of null values.
If None, it is calculated automatically.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> a = cudf.Series([1, 2, 3, None, 4, None])
>>> a
0 1
1 2
2 3
3 <NA>
4 4
5 <NA>
dtype: int64
>>> b = cudf.Series([10, 11, 12, 13, 14])
>>> cudf.Series.from_masked_array(data=b, mask=a._column.mask)
0 10
1 11
2 12
3 <NA>
4 14
dtype: int64
"""
col = column.as_column(data).set_mask(mask)
return cls(data=col)
@_cudf_nvtx_annotate
def __init__(
self,
data=None,
index=None,
dtype=None,
name=None,
copy=False,
nan_as_null=True,
):
if (
isinstance(data, Sequence)
and len(data) == 0
and dtype is None
and getattr(data, "dtype", None) is None
):
warnings.warn(
"The default dtype for empty Series will be 'object' instead "
"of 'float64' in a future version. Specify a dtype explicitly "
"to silence this warning.",
FutureWarning,
)
if isinstance(data, pd.Series):
if name is None:
name = data.name
if isinstance(data.index, pd.MultiIndex):
index = cudf.from_pandas(data.index)
else:
index = as_index(data.index)
elif isinstance(data, pd.Index):
if name is None:
name = data.name
data = as_column(data, nan_as_null=nan_as_null, dtype=dtype)
elif isinstance(data, BaseIndex):
if name is None:
name = data.name
data = data._values
if dtype is not None:
data = data.astype(dtype)
elif isinstance(data, ColumnAccessor):
raise TypeError(
"Use cudf.Series._from_data for constructing a Series from "
"ColumnAccessor"
)
if isinstance(data, Series):
if index is not None:
data = data.reindex(index)
else:
index = data._index
if name is None:
name = data.name
data = data._column
if copy:
data = data.copy(deep=True)
if dtype is not None:
data = data.astype(dtype)
if isinstance(data, dict):
if not data:
current_index = RangeIndex(0)
else:
current_index = data.keys()
if index is not None:
series = Series(
list(data.values()),
nan_as_null=nan_as_null,
dtype=dtype,
index=current_index,
)
new_index = as_index(index)
if not series.index.equals(new_index):
series = series.reindex(new_index)
data = series._column
index = series._index
else:
data = column.as_column(
list(data.values()), nan_as_null=nan_as_null, dtype=dtype
)
index = current_index
if data is None:
if index is not None:
data = column.column_empty(
row_count=len(index), dtype=None, masked=True
)
else:
data = {}
if not isinstance(data, ColumnBase):
# Using `getattr_static` to check if
# `data` is on device memory and perform
# a deep copy later. This is different
# from `hasattr` because, it doesn't
# invoke the property we are looking
# for and the latter actually invokes
# the property, which in this case could
# be expensive or mark a buffer as
# unspillable.
has_cai = (
type(
inspect.getattr_static(
data, "__cuda_array_interface__", None
)
)
is property
)
data = column.as_column(
data,
nan_as_null=nan_as_null,
dtype=dtype,
length=len(index) if index is not None else None,
)
if copy and has_cai:
data = data.copy(deep=True)
else:
if dtype is not None:
data = data.astype(dtype)
if index is not None and not isinstance(index, BaseIndex):
index = as_index(index)
assert isinstance(data, ColumnBase)
super().__init__({name: data})
self._index = RangeIndex(len(data)) if index is None else index
self._check_data_index_length_match()
@classmethod
@_cudf_nvtx_annotate
def _from_data(
cls,
data: MutableMapping,
index: Optional[BaseIndex] = None,
name: Any = no_default,
) -> Series:
out = super()._from_data(data=data, index=index)
if name is not no_default:
out.name = name
return out
@_cudf_nvtx_annotate
def __contains__(self, item):
return item in self._index
@classmethod
@_cudf_nvtx_annotate
def from_pandas(cls, s, nan_as_null=no_default):
"""
Convert from a Pandas Series.
Parameters
----------
s : Pandas Series object
A Pandas Series object which has to be converted
to cuDF Series.
nan_as_null : bool, Default None
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> import numpy as np
>>> data = [10, 20, 30, np.nan]
>>> pds = pd.Series(data, dtype='float64')
>>> cudf.Series.from_pandas(pds)
0 10.0
1 20.0
2 30.0
3 <NA>
dtype: float64
>>> cudf.Series.from_pandas(pds, nan_as_null=False)
0 10.0
1 20.0
2 30.0
3 NaN
dtype: float64
"""
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = cls(s, nan_as_null=nan_as_null)
return result
@property # type: ignore
@_cudf_nvtx_annotate
def is_unique(self):
"""Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self._column.is_unique
@property # type: ignore
@_cudf_nvtx_annotate
def dt(self):
"""
Accessor object for datetime-like properties of the Series values.
Examples
--------
>>> s = cudf.Series(cudf.date_range(
... start='2001-02-03 12:00:00',
... end='2001-02-03 14:00:00',
... freq='1H'))
>>> s.dt.hour
0 12
1 13
dtype: int16
>>> s.dt.second
0 0
1 0
dtype: int16
>>> s.dt.day
0 3
1 3
dtype: int16
Returns
-------
A Series indexed like the original Series.
Raises
------
TypeError if the Series does not contain datetimelike values.
"""
if isinstance(self._column, DatetimeColumn):
return DatetimeProperties(self)
elif isinstance(self._column, TimeDeltaColumn):
return TimedeltaProperties(self)
else:
raise AttributeError(
"Can only use .dt accessor with datetimelike values"
)
@property # type:ignore
@_cudf_nvtx_annotate
def axes(self):
"""
Return a list representing the axes of the Series.
Series.axes returns a list containing the row index.
Examples
--------
>>> import cudf
>>> csf1 = cudf.Series([1, 2, 3, 4])
>>> csf1.axes
[RangeIndex(start=0, stop=4, step=1)]
"""
return [self.index]
@property # type: ignore
@_cudf_nvtx_annotate
def hasnans(self):
"""
Return True if there are any NaNs or nulls.
Returns
-------
out : bool
If Series has at least one NaN or null value, return True,
if not return False.
Examples
--------
>>> import cudf
>>> import numpy as np
>>> series = cudf.Series([1, 2, np.nan, 3, 4], nan_as_null=False)
>>> series
0 1.0
1 2.0
2 NaN
3 3.0
4 4.0
dtype: float64
>>> series.hasnans
True
`hasnans` returns `True` for the presence of any `NA` values:
>>> series = cudf.Series([1, 2, 3, None, 4])
>>> series
0 1
1 2
2 3
3 <NA>
4 4
dtype: int64
>>> series.hasnans
True
"""
return self._column.has_nulls(include_nan=True)
@_cudf_nvtx_annotate
def serialize(self):
header, frames = super().serialize()
header["index"], index_frames = self._index.serialize()
header["index_frame_count"] = len(index_frames)
# For backwards compatibility with older versions of cuDF, index
# columns are placed before data columns.
frames = index_frames + frames
return header, frames
@classmethod
@_cudf_nvtx_annotate
def deserialize(cls, header, frames):
index_nframes = header["index_frame_count"]
obj = super().deserialize(
header, frames[header["index_frame_count"] :]
)
idx_typ = pickle.loads(header["index"]["type-serialized"])
index = idx_typ.deserialize(header["index"], frames[:index_nframes])
obj._index = index
return obj
def _get_columns_by_label(self, labels, *, downcast=False) -> Self:
"""Return the column specified by `labels`
For cudf.Series, either the column, or an empty series is returned.
Parameter `downcast` does not have effects.
"""
ca = self._data.select_by_label(labels)
return (
self.__class__._from_data(data=ca, index=self.index)
if len(ca) > 0
else self.__class__(dtype=self.dtype, name=self.name)
)
@_cudf_nvtx_annotate
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
if axis == 1:
raise ValueError("No axis named 1 for object type Series")
# Ignore columns for Series
if columns is not None:
columns = []
return super().drop(
labels, axis, index, columns, level, inplace, errors
)
def tolist(self): # noqa: D102
raise TypeError(
"cuDF does not support conversion to host memory "
"via the `tolist()` method. Consider using "
"`.to_arrow().to_pylist()` to construct a Python list."
)
to_list = tolist
@_cudf_nvtx_annotate
def to_dict(self, into: type[dict] = dict) -> dict:
"""
Convert Series to {label -> value} dict or dict-like object.
Parameters
----------
into : class, default dict
The collections.abc.Mapping subclass to use as the return
object. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
collections.abc.Mapping
Key-value representation of Series.
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.to_dict()
{0: 1, 1: 2, 2: 3, 3: 4}
>>> from collections import OrderedDict, defaultdict
>>> s.to_dict(OrderedDict)
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> dd = defaultdict(list)
>>> s.to_dict(dd)
defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4})
"""
return self.to_pandas().to_dict(into=into)
@_cudf_nvtx_annotate
def append(self, to_append, ignore_index=False, verify_integrity=False):
"""Append values from another ``Series`` or array-like object.
If ``ignore_index=True``, the index is reset.
Parameters
----------
to_append : Series or list/tuple of Series
Series to append with self.
ignore_index : boolean, default False.
If True, do not use the index.
verify_integrity : bool, default False
This Parameter is currently not supported.
Returns
-------
Series
A new concatenated series
See Also
--------
cudf.concat : General function to concatenate DataFrame or
Series objects.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series([1, 2, 3])
>>> s2 = cudf.Series([4, 5, 6])
>>> s1
0 1
1 2
2 3
dtype: int64
>>> s2
0 4
1 5
2 6
dtype: int64
>>> s1.append(s2)
0 1
1 2
2 3
0 4
1 5
2 6
dtype: int64
>>> s3 = cudf.Series([4, 5, 6], index=[3, 4, 5])
>>> s3
3 4
4 5
5 6
dtype: int64
>>> s1.append(s3)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
With `ignore_index` set to True:
>>> s1.append(s2, ignore_index=True)
0 1
1 2
2 3
3 4
4 5
5 6
dtype: int64
"""
return super()._append(to_append, ignore_index, verify_integrity)
@_cudf_nvtx_annotate
def reindex(self, *args, **kwargs):
"""
Conform Series to new index.
Parameters
----------
index : Index, Series-convertible, default None
New labels / index to conform to,
should be specified using keywords.
method: Not Supported
copy : boolean, default True
level: Not Supported
fill_value : Value to use for missing values.
Defaults to ``NA``, but can be any "compatible" value.
limit: Not Supported
tolerance: Not Supported
Returns
-------
Series with changed index.
Examples
--------
>>> import cudf
>>> series = cudf.Series([10, 20, 30, 40], index=['a', 'b', 'c', 'd'])
>>> series
a 10
b 20
c 30
d 40
dtype: int64
>>> series.reindex(['a', 'b', 'y', 'z'])
a 10
b 20
y <NA>
z <NA>
dtype: int64
.. pandas-compat::
**Series.reindex**
Note: One difference from Pandas is that ``NA`` is used for rows
that do not match, rather than ``NaN``. One side effect of this is
that the series retains an integer dtype in cuDF
where it is cast to float in Pandas.
"""
if len(args) > 1:
raise TypeError(
"Only one positional argument ('index') is allowed"
)
if args:
(index,) = args
if "index" in kwargs:
raise TypeError(
"'index' passed as both positional and keyword argument"
)
else:
index = kwargs.get("index", self._index)
name = self.name or 0
series = self._reindex(
deep=kwargs.get("copy", True),
dtypes={name: self.dtype},
index=index,
column_names=[name],
inplace=False,
fill_value=kwargs.get("fill_value", cudf.NA),
)
series.name = self.name
return series
@_cudf_nvtx_annotate
@docutils.doc_apply(
doc_reset_index_template.format(
klass="Series",
argument="""
name : object, optional
The name to use for the column containing the original Series
values. Uses self.name by default. This argument is ignored when
``drop`` is True.""",
return_type="Series or DataFrame or None",
return_doc=""" For Series, When drop is False (the default), a DataFrame
is returned. The newly created columns will come first in the
DataFrame, followed by the original Series values. When `drop` is
True, a `Series` is returned. In either case, if ``inplace=True``,
no value is returned.
""", # noqa: E501
example="""
>>> series = cudf.Series(['a', 'b', 'c', 'd'], index=[10, 11, 12, 13])
>>> series
10 a
11 b
12 c
13 d
dtype: object
>>> series.reset_index()
index 0
0 10 a
1 11 b
2 12 c
3 13 d
>>> series.reset_index(drop=True)
0 a
1 b
2 c
3 d
dtype: object
You can also use ``reset_index`` with MultiIndex.
>>> s2 = cudf.Series(
... range(4), name='foo',
... index=cudf.MultiIndex.from_tuples([
... ('bar', 'one'), ('bar', 'two'),
... ('baz', 'one'), ('baz', 'two')],
... names=['a', 'b']
... ))
>>> s2
a b
bar one 0
two 1
baz one 2
two 3
Name: foo, dtype: int64
>>> s2.reset_index(level='a')
a foo
b
one bar 0
two bar 1
one baz 2
two baz 3
""",
)
)
def reset_index(self, level=None, drop=False, name=None, inplace=False):
if not drop and inplace:
raise TypeError(
"Cannot reset_index inplace on a Series "
"to create a DataFrame"
)
data, index = self._reset_index(level=level, drop=drop)
if not drop:
if name is None:
name = 0 if self.name is None else self.name
data[name] = data.pop(self.name)
return cudf.core.dataframe.DataFrame._from_data(data, index)
# For ``name`` behavior, see:
# https://github.com/pandas-dev/pandas/issues/44575
# ``name`` has to be ignored when `drop=True`
return self._mimic_inplace(
Series._from_data(data, index, self.name),
inplace=inplace,
)
@_cudf_nvtx_annotate
def to_frame(self, name=None):
"""Convert Series into a DataFrame
Parameters
----------
name : str, default None
Name to be used for the column
Returns
-------
DataFrame
cudf DataFrame
Examples
--------
>>> import cudf
>>> series = cudf.Series(['a', 'b', 'c', None, 'd'], name='sample', index=[10, 11, 12, 13, 15])
>>> series
10 a
11 b
12 c
13 <NA>
15 d
Name: sample, dtype: object
>>> series.to_frame()
sample
10 a
11 b
12 c
13 <NA>
15 d
""" # noqa: E501
if name is not None:
col = name
elif self.name is None:
col = 0
else:
col = self.name
return cudf.DataFrame({col: self._column}, index=self.index)
@_cudf_nvtx_annotate
def memory_usage(self, index=True, deep=False):
return self._column.memory_usage + (
self._index.memory_usage() if index else 0
)
@_cudf_nvtx_annotate
def __array_function__(self, func, types, args, kwargs):
if "out" in kwargs or not all(issubclass(t, Series) for t in types):
return NotImplemented
try:
# Apply a Series method if one exists.
if cudf_func := getattr(Series, func.__name__, None):
result = cudf_func(*args, **kwargs)
if func.__name__ == "unique":
# NumPy expects a sorted result for `unique`, which is not
# guaranteed by cudf.Series.unique.
result = result.sort_values()
return result
# Assume that cupy subpackages match numpy and search the
# corresponding cupy submodule based on the func's __module__.
numpy_submodule = func.__module__.split(".")[1:]
cupy_func = cupy
for name in (*numpy_submodule, func.__name__):
cupy_func = getattr(cupy_func, name, None)
# Handle case if cupy does not implement the function or just
# aliases the numpy function.
if not cupy_func or cupy_func is func:
return NotImplemented
# For now just fail on cases with mismatched indices. There is
# almost certainly no general solution for all array functions.
index = args[0].index
if not all(s.index.equals(index) for s in args):
return NotImplemented
out = cupy_func(*(s.values for s in args), **kwargs)
# Return (host) scalar values immediately.
if not isinstance(out, cupy.ndarray):
return out
# 0D array (scalar)
if out.ndim == 0:
return to_cudf_compatible_scalar(out)
# 1D array
elif (
# Only allow 1D arrays
((out.ndim == 1) or (out.ndim == 2 and out.shape[1] == 1))
# If we have an index, it must be the same length as the
# output for cupy dispatching to be well-defined.
and len(index) == len(out)
):
return Series(out, index=index)
except Exception:
# The rare instance where a "silent" failure is preferable. Except
# in the (highly unlikely) case that some other library
# interoperates with cudf objects, the result will be that numpy
# raises a TypeError indicating that the operation is not
# implemented, which is much friendlier than an arbitrary internal
# cudf error.
pass
return NotImplemented
@_cudf_nvtx_annotate
def map(self, arg, na_action=None) -> "Series":
"""
Map values of Series according to input correspondence.
Used for substituting each value in a Series with another value,
that may be derived from a function, a ``dict`` or
a :class:`Series`.
Parameters
----------
arg : function, collections.abc.Mapping subclass or Series
Mapping correspondence.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to the
mapping correspondence.
Returns
-------
Series
Same index as caller.
Examples
--------
>>> s = cudf.Series(['cat', 'dog', np.nan, 'rabbit'])
>>> s
0 cat
1 dog
2 <NA>
3 rabbit
dtype: object
``map`` accepts a ``dict`` or a ``Series``. Values that are not found
in the ``dict`` are converted to ``NaN``, default values in dicts are
currently not supported.:
>>> s.map({'cat': 'kitten', 'dog': 'puppy'})
0 kitten
1 puppy
2 <NA>
3 <NA>
dtype: object
It also accepts numeric functions:
>>> s = cudf.Series([1, 2, 3, 4, np.nan])
>>> s.map(lambda x: x ** 2)
0 1
1 4
2 9
3 16
4 <NA>
dtype: int64
Notes
-----
Please note map currently only supports fixed-width numeric
type functions.
"""
if isinstance(arg, dict):
if hasattr(arg, "__missing__"):
raise NotImplementedError(
"default values in dicts are currently not supported."
)
lhs = cudf.DataFrame({"x": self, "orig_order": arange(len(self))})
rhs = cudf.DataFrame(
{
"x": arg.keys(),
"s": arg.values(),
"bool": full(len(arg), True, dtype=self.dtype),
}
)
res = lhs.merge(rhs, on="x", how="left").sort_values(
by="orig_order"
)
result = res["s"]
result.name = self.name
result.index = self.index
elif isinstance(arg, cudf.Series):
if not arg.index.is_unique:
raise ValueError(
"Reindexing only valid with"
" uniquely valued Index objects"
)
lhs = cudf.DataFrame({"x": self, "orig_order": arange(len(self))})
rhs = cudf.DataFrame(
{
"x": arg.keys(),
"s": arg,
"bool": full(len(arg), True, dtype=self.dtype),
}
)
res = lhs.merge(rhs, on="x", how="left").sort_values(
by="orig_order"
)
result = res["s"]
result.name = self.name
result.index = self.index
else:
result = self.apply(arg)
return result
def _getitem_preprocessed(
self,
spec: indexing_utils.IndexingSpec,
) -> Union[Self, ScalarLike]:
"""Get subset of entries given structured data
Parameters
----------
spec
Indexing specification
Returns
-------
Subsetted Series or else scalar (if a scalar entry is
requested)
Notes
-----
This function performs no bounds-checking or massaging of the
inputs.
"""
if isinstance(spec, indexing_utils.MapIndexer):
return self._gather(spec.key, keep_index=True)
elif isinstance(spec, indexing_utils.MaskIndexer):
return self._apply_boolean_mask(spec.key, keep_index=True)
elif isinstance(spec, indexing_utils.SliceIndexer):
return self._slice(spec.key)
elif isinstance(spec, indexing_utils.ScalarIndexer):
return self._gather(
spec.key, keep_index=False
)._column.element_indexing(0)
elif isinstance(spec, indexing_utils.EmptyIndexer):
return self._empty_like(keep_index=True)
assert_never(spec)
@_cudf_nvtx_annotate
def __getitem__(self, arg):
if isinstance(arg, slice):
return self.iloc[arg]
else:
return self.loc[arg]
iteritems = SingleColumnFrame.__iter__
items = SingleColumnFrame.__iter__
@_cudf_nvtx_annotate
def __setitem__(self, key, value):
if isinstance(key, slice):
self.iloc[key] = value
else:
self.loc[key] = value
def __repr__(self):
_, height = get_terminal_size()
max_rows = (
height
if get_option("display.max_rows") == 0
else get_option("display.max_rows")
)
if max_rows not in (0, None) and len(self) > max_rows:
top = self.head(int(max_rows / 2 + 1))
bottom = self.tail(int(max_rows / 2 + 1))
preprocess = cudf.concat([top, bottom])
else:
preprocess = self.copy()
preprocess.index = preprocess.index._clean_nulls_from_index()
if (
preprocess.nullable
and not isinstance(
preprocess._column, cudf.core.column.CategoricalColumn
)
and not is_list_dtype(preprocess.dtype)
and not is_struct_dtype(preprocess.dtype)
and not is_decimal_dtype(preprocess.dtype)
and not is_struct_dtype(preprocess.dtype)
) or isinstance(
preprocess._column,
cudf.core.column.timedelta.TimeDeltaColumn,
):
fill_value = (
str(cudf.NaT)
if isinstance(
preprocess._column,
(
cudf.core.column.TimeDeltaColumn,
cudf.core.column.DatetimeColumn,
),
)
else str(cudf.NA)
)
output = repr(
preprocess.astype("str").fillna(fill_value).to_pandas()
)
elif isinstance(
preprocess._column, cudf.core.column.CategoricalColumn
):
min_rows = (
height
if get_option("display.min_rows") == 0
else get_option("display.min_rows")
)
show_dimensions = get_option("display.show_dimensions")
if preprocess._column.categories.dtype.kind == "f":
pd_series = (
preprocess.astype("str")
.to_pandas()
.astype(
dtype=pd.CategoricalDtype(
categories=preprocess.dtype.categories.astype(
"str"
).to_pandas(),
ordered=preprocess.dtype.ordered,
)
)
)
else:
pd_series = preprocess.to_pandas()
output = pd_series.to_string(
name=self.name,
dtype=self.dtype,
min_rows=min_rows,
max_rows=max_rows,
length=show_dimensions,
na_rep=str(cudf.NA),
)
else:
output = repr(preprocess.to_pandas())
lines = output.split("\n")
if isinstance(preprocess._column, cudf.core.column.CategoricalColumn):
category_memory = lines[-1]
if preprocess._column.categories.dtype.kind == "f":
category_memory = category_memory.replace("'", "").split(": ")
category_memory = (
category_memory[0].replace(
"object", preprocess._column.categories.dtype.name
)
+ ": "
+ category_memory[1]
)
lines = lines[:-1]
if len(lines) > 1:
if lines[-1].startswith("Name: "):
lines = lines[:-1]
lines.append("Name: %s" % str(self.name))
if len(self) > len(preprocess):
lines[-1] = lines[-1] + ", Length: %d" % len(self)
lines[-1] = lines[-1] + ", "
elif lines[-1].startswith("Length: "):
lines = lines[:-1]
lines.append("Length: %d" % len(self))
lines[-1] = lines[-1] + ", "
else:
lines = lines[:-1]
lines[-1] = lines[-1] + "\n"
lines[-1] = lines[-1] + "dtype: %s" % self.dtype
else:
lines = output.split(",")
lines[-1] = " dtype: %s)" % self.dtype
return ",".join(lines)
if isinstance(preprocess._column, cudf.core.column.CategoricalColumn):
lines.append(category_memory)
return "\n".join(lines)
def _make_operands_and_index_for_binop(
self,
other: Any,
fn: str,
fill_value: Any = None,
reflect: bool = False,
can_reindex: bool = False,
*args,
**kwargs,
) -> Tuple[
Union[
Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]],
NotImplementedType,
],
Optional[BaseIndex],
bool,
]:
# Specialize binops to align indices.
if isinstance(other, Series):
if (
not can_reindex
and fn in cudf.utils.utils._EQUALITY_OPS
and not self.index.equals(other.index)
):
raise ValueError(
"Can only compare identically-labeled Series objects"
)
lhs, other = _align_indices([self, other], allow_non_unique=True)
else:
lhs = self
try:
can_use_self_column_name = cudf.utils.utils._is_same_name(
self.name, other.name
)
except AttributeError:
can_use_self_column_name = False
operands = lhs._make_operands_for_binop(other, fill_value, reflect)
return operands, lhs._index, can_use_self_column_name
@copy_docstring(CategoricalAccessor) # type: ignore
@property
@_cudf_nvtx_annotate
def cat(self):
return CategoricalAccessor(parent=self)
@copy_docstring(StringMethods) # type: ignore
@property
@_cudf_nvtx_annotate
def str(self):
return StringMethods(parent=self)
@copy_docstring(ListMethods) # type: ignore
@property
@_cudf_nvtx_annotate
def list(self):
return ListMethods(parent=self)
@copy_docstring(StructMethods) # type: ignore
@property
@_cudf_nvtx_annotate
def struct(self):
return StructMethods(parent=self)
@property # type: ignore
@_cudf_nvtx_annotate
def dtype(self):
"""The dtype of the Series."""
return self._column.dtype
@classmethod
@_cudf_nvtx_annotate
def _concat(cls, objs, axis=0, index=True):
# Concatenate index if not provided
if index is True:
if isinstance(objs[0].index, cudf.MultiIndex):
index = cudf.MultiIndex._concat([o.index for o in objs])
else:
index = cudf.core.index.GenericIndex._concat(
[o.index for o in objs]
)
names = {obj.name for obj in objs}
if len(names) == 1:
[name] = names
else:
name = None
if len(objs) > 1:
dtype_mismatch = False
for obj in objs[1:]:
if (
obj.null_count == len(obj)
or len(obj) == 0
or isinstance(
obj._column, cudf.core.column.CategoricalColumn
)
or isinstance(
objs[0]._column, cudf.core.column.CategoricalColumn
)
):
continue
if (
not dtype_mismatch
and (
not isinstance(
objs[0]._column, cudf.core.column.CategoricalColumn
)
and not isinstance(
obj._column, cudf.core.column.CategoricalColumn
)
)
and objs[0].dtype != obj.dtype
):
dtype_mismatch = True
if is_mixed_with_object_dtype(objs[0], obj):
raise TypeError(
"cudf does not support mixed types, please type-cast "
"both series to same dtypes."
)
if dtype_mismatch:
common_dtype = find_common_type([obj.dtype for obj in objs])
objs = [obj.astype(common_dtype) for obj in objs]
col = concat_columns([o._column for o in objs])
if len(objs):
col = col._with_type_metadata(objs[0].dtype)
return cls(data=col, index=index, name=name)
@property # type: ignore
@_cudf_nvtx_annotate
def valid_count(self):
"""Number of non-null values"""
return self._column.valid_count
@property # type: ignore
@_cudf_nvtx_annotate
def null_count(self):
"""Number of null values"""
return self._column.null_count
@property # type: ignore
@_cudf_nvtx_annotate
def nullable(self):
"""A boolean indicating whether a null-mask is needed"""
return self._column.nullable
@property # type: ignore
@_cudf_nvtx_annotate
def has_nulls(self):
"""
Indicator whether Series contains null values.
Returns
-------
out : bool
If Series has at least one null value, return True, if not
return False.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 2, None, 3, 4])
>>> series
0 1
1 2
2 <NA>
3 3
4 4
dtype: int64
>>> series.has_nulls
True
>>> series.dropna().has_nulls
False
"""
return self._column.has_nulls()
@_cudf_nvtx_annotate
def dropna(self, axis=0, inplace=False, how=None):
"""
Return a Series with null values removed.
Parameters
----------
axis : {0 or 'index'}, default 0
There is only one axis to drop values from.
inplace : bool, default False
If True, do operation inplace and return None.
how : str, optional
Not in use. Kept for compatibility.
Returns
-------
Series
Series with null entries dropped from it.
See Also
--------
Series.isna : Indicate null values.
Series.notna : Indicate non-null values.
Series.fillna : Replace null values.
cudf.DataFrame.dropna : Drop rows or columns which
contain null values.
cudf.Index.dropna : Drop null indices.
Examples
--------
>>> import cudf
>>> ser = cudf.Series([1, 2, None])
>>> ser
0 1
1 2
2 <NA>
dtype: int64
Drop null values from a Series.
>>> ser.dropna()
0 1
1 2
dtype: int64
Keep the Series with valid entries in the same variable.
>>> ser.dropna(inplace=True)
>>> ser
0 1
1 2
dtype: int64
Empty strings are not considered null values.
`None` is considered a null value.
>>> ser = cudf.Series(['', None, 'abc'])
>>> ser
0
1 <NA>
2 abc
dtype: object
>>> ser.dropna()
0
2 abc
dtype: object
"""
if axis not in (0, "index"):
raise ValueError(
"Series.dropna supports only one axis to drop values from"
)
result = super().dropna(axis=axis)
return self._mimic_inplace(result, inplace=inplace)
@_cudf_nvtx_annotate
def drop_duplicates(self, keep="first", inplace=False, ignore_index=False):
"""
Return Series with duplicate values removed.
Parameters
----------
keep : {'first', 'last', ``False``}, default 'first'
Method to handle dropping duplicates:
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
If ``True``, performs operation inplace and returns None.
Returns
-------
Series or None
Series with duplicates dropped or None if ``inplace=True``.
Examples
--------
>>> s = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
... name='animal')
>>> s
0 lama
1 cow
2 lama
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
With the `keep` parameter, the selection behavior of duplicated
values can be changed. The value 'first' keeps the first
occurrence for each set of duplicated entries.
The default value of keep is 'first'. Note that order of
the rows being returned is not guaranteed
to be sorted.
>>> s.drop_duplicates()
0 lama
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
The value 'last' for parameter `keep` keeps the last occurrence
for each set of duplicated entries.
>>> s.drop_duplicates(keep='last')
1 cow
3 beetle
4 lama
5 hippo
Name: animal, dtype: object
The value `False` for parameter `keep` discards all sets
of duplicated entries. Setting the value of 'inplace' to
`True` performs the operation inplace and returns `None`.
>>> s.drop_duplicates(keep=False, inplace=True)
>>> s
1 cow
3 beetle
5 hippo
Name: animal, dtype: object
"""
result = super().drop_duplicates(keep=keep, ignore_index=ignore_index)
return self._mimic_inplace(result, inplace=inplace)
@_cudf_nvtx_annotate
def fillna(
self, value=None, method=None, axis=None, inplace=False, limit=None
):
if isinstance(value, pd.Series):
value = Series.from_pandas(value)
if not (is_scalar(value) or isinstance(value, (abc.Mapping, Series))):
raise TypeError(
f'"value" parameter must be a scalar, dict '
f"or Series, but you passed a "
f'"{type(value).__name__}"'
)
if isinstance(value, (abc.Mapping, Series)):
value = Series(value)
if not self.index.equals(value.index):
value = value.reindex(self.index)
value = value._column
return super().fillna(
value=value, method=method, axis=axis, inplace=inplace, limit=limit
)
def between(self, left, right, inclusive="both") -> Series:
"""
Return boolean Series equivalent to left <= series <= right.
This function returns a boolean vector containing `True` wherever the
corresponding Series element is between the boundary values `left` and
`right`. NA values are treated as `False`.
Parameters
----------
left : scalar or list-like
Left boundary.
right : scalar or list-like
Right boundary.
inclusive : {"both", "neither", "left", "right"}
Include boundaries. Whether to set each bound as closed or open.
Returns
-------
Series
Series representing whether each element is between left and
right (inclusive).
See Also
--------
Series.gt : Greater than of series and other.
Series.lt : Less than of series and other.
Notes
-----
This function is equivalent to ``(left <= ser) & (ser <= right)``
Examples
--------
>>> import cudf
>>> s = cudf.Series([2, 0, 4, 8, None])
Boundary values are included by default:
>>> s.between(1, 4)
0 True
1 False
2 True
3 False
4 <NA>
dtype: bool
With `inclusive` set to ``"neither"`` boundary values are excluded:
>>> s.between(1, 4, inclusive="neither")
0 True
1 False
2 False
3 False
4 <NA>
dtype: bool
`left` and `right` can be any scalar value:
>>> s = cudf.Series(['Alice', 'Bob', 'Carol', 'Eve'])
>>> s.between('Anna', 'Daniel')
0 False
1 True
2 True
3 False
dtype: bool
"""
left_operand = left if is_scalar(left) else as_column(left)
right_operand = right if is_scalar(right) else as_column(right)
if inclusive == "both":
lmask = self._column >= left_operand
rmask = self._column <= right_operand
elif inclusive == "left":
lmask = self._column >= left_operand
rmask = self._column < right_operand
elif inclusive == "right":
lmask = self._column > left_operand
rmask = self._column <= right_operand
elif inclusive == "neither":
lmask = self._column > left_operand
rmask = self._column < right_operand
else:
raise ValueError(
"Inclusive has to be either string of 'both', "
"'left', 'right', or 'neither'."
)
return self._from_data({self.name: lmask & rmask}, self._index)
@_cudf_nvtx_annotate
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
if bool_only not in (None, True):
raise NotImplementedError(
"The bool_only parameter is not supported for Series."
)
return super().all(axis, skipna, level, **kwargs)
@_cudf_nvtx_annotate
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
if bool_only not in (None, True):
raise NotImplementedError(
"The bool_only parameter is not supported for Series."
)
return super().any(axis, skipna, level, **kwargs)
@_cudf_nvtx_annotate
def to_pandas(self, index=True, nullable=False, **kwargs):
"""
Convert to a Pandas Series.
Parameters
----------
index : Boolean, Default True
If ``index`` is ``True``, converts the index of cudf.Series
and sets it to the pandas.Series. If ``index`` is ``False``,
no index conversion is performed and pandas.Series will assign
a default index.
nullable : Boolean, Default False
If ``nullable`` is ``True``, the resulting series will be
having a corresponding nullable Pandas dtype.
If there is no corresponding nullable Pandas dtype present,
the resulting dtype will be a regular pandas dtype.
If ``nullable`` is ``False``, the resulting series will
either convert null values to ``np.nan`` or ``None``
depending on the dtype.
Returns
-------
out : Pandas Series
Examples
--------
>>> import cudf
>>> ser = cudf.Series([-3, 2, 0])
>>> pds = ser.to_pandas()
>>> pds
0 -3
1 2
2 0
dtype: int64
>>> type(pds)
<class 'pandas.core.series.Series'>
``nullable`` parameter can be used to control
whether dtype can be Pandas Nullable or not:
>>> ser = cudf.Series([10, 20, None, 30])
>>> ser
0 10
1 20
2 <NA>
3 30
dtype: int64
>>> ser.to_pandas(nullable=True)
0 10
1 20
2 <NA>
3 30
dtype: Int64
>>> ser.to_pandas(nullable=False)
0 10.0
1 20.0
2 NaN
3 30.0
dtype: float64
"""
if index is True:
index = self.index.to_pandas()
s = self._column.to_pandas(index=index, nullable=nullable)
s.name = self.name
return s
@property # type: ignore
@_cudf_nvtx_annotate
def data(self):
"""The gpu buffer for the data
Returns
-------
out : The GPU buffer of the Series.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4])
>>> series
0 1
1 2
2 3
3 4
dtype: int64
>>> np.array(series.data.memoryview())
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0, 4, 0, 0, 0, 0, 0, 0, 0], dtype=uint8)
""" # noqa: E501
return self._column.data
@property # type: ignore
@_cudf_nvtx_annotate
def nullmask(self):
"""The gpu buffer for the null-mask"""
return cudf.Series(self._column.nullmask)
@_cudf_nvtx_annotate
def astype(self, dtype, copy=False, errors="raise", **kwargs):
if is_dict_like(dtype):
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for the key in Series "
"dtype mappings."
)
else:
dtype = {self.name: dtype}
return super().astype(dtype, copy, errors, **kwargs)
@_cudf_nvtx_annotate
def sort_index(self, axis=0, *args, **kwargs):
if axis not in (0, "index"):
raise ValueError("Only axis=0 is valid for Series.")
return super().sort_index(axis=axis, *args, **kwargs)
@_cudf_nvtx_annotate
def sort_values(
self,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
"""Sort by the values along either axis.
Parameters
----------
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of the
by.
na_position : {'first', 'last'}, default 'last'
'first' puts nulls at the beginning, 'last' puts nulls at the end
ignore_index : bool, default False
If True, index will not be sorted.
Returns
-------
Series : Series with sorted values.
Notes
-----
Difference from pandas:
* Support axis='index' only.
* Not supporting: inplace, kind
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 5, 2, 4, 3])
>>> s.sort_values()
0 1
2 2
4 3
3 4
1 5
dtype: int64
"""
return super().sort_values(
by=self.name,
axis=axis,
ascending=ascending,
inplace=inplace,
kind=kind,
na_position=na_position,
ignore_index=ignore_index,
)
@_cudf_nvtx_annotate
def nlargest(self, n=5, keep="first"):
"""Returns a new Series of the *n* largest element.
Parameters
----------
n : int, default 5
Return this many descending sorted values.
keep : {'first', 'last'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
Returns
-------
Series
The `n` largest values in the Series, sorted in decreasing order.
Examples
--------
>>> import cudf
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Malta": 434000, "Maldives": 434000,
... "Brunei": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> series = cudf.Series(countries_population)
>>> series
Italy 59000000
France 65000000
Malta 434000
Maldives 434000
Brunei 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
>>> series.nlargest()
France 65000000
Italy 59000000
Malta 434000
Maldives 434000
Brunei 434000
dtype: int64
>>> series.nlargest(3)
France 65000000
Italy 59000000
Malta 434000
dtype: int64
>>> series.nlargest(3, keep='last')
France 65000000
Italy 59000000
Brunei 434000
dtype: int64
"""
return self._n_largest_or_smallest(True, n, [self.name], keep)
@_cudf_nvtx_annotate
def nsmallest(self, n=5, keep="first"):
"""
Returns a new Series of the *n* smallest element.
Parameters
----------
n : int, default 5
Return this many ascending sorted values.
keep : {'first', 'last'}, default 'first'
When there are duplicate values that cannot all fit in a
Series of `n` elements:
- ``first`` : return the first `n` occurrences in order
of appearance.
- ``last`` : return the last `n` occurrences in reverse
order of appearance.
Returns
-------
Series
The `n` smallest values in the Series, sorted in increasing order.
Examples
--------
>>> import cudf
>>> countries_population = {"Italy": 59000000, "France": 65000000,
... "Brunei": 434000, "Malta": 434000,
... "Maldives": 434000, "Iceland": 337000,
... "Nauru": 11300, "Tuvalu": 11300,
... "Anguilla": 11300, "Montserrat": 5200}
>>> s = cudf.Series(countries_population)
>>> s
Italy 59000000
France 65000000
Brunei 434000
Malta 434000
Maldives 434000
Iceland 337000
Nauru 11300
Tuvalu 11300
Anguilla 11300
Montserrat 5200
dtype: int64
The `n` smallest elements where ``n=5`` by default.
>>> s.nsmallest()
Montserrat 5200
Nauru 11300
Tuvalu 11300
Anguilla 11300
Iceland 337000
dtype: int64
The `n` smallest elements where ``n=3``. Default `keep` value is
'first' so Nauru and Tuvalu will be kept.
>>> s.nsmallest(3)
Montserrat 5200
Nauru 11300
Tuvalu 11300
dtype: int64
The `n` smallest elements where ``n=3`` and keeping the last
duplicates. Anguilla and Tuvalu will be kept since they are the last
with value 11300 based on the index order.
>>> s.nsmallest(3, keep='last')
Montserrat 5200
Anguilla 11300
Tuvalu 11300
dtype: int64
"""
return self._n_largest_or_smallest(False, n, [self.name], keep)
@_cudf_nvtx_annotate
def argsort(
self,
axis=0,
kind="quicksort",
order=None,
ascending=True,
na_position="last",
):
obj = self.__class__._from_data(
{
None: super().argsort(
axis=axis,
kind=kind,
order=order,
ascending=ascending,
na_position=na_position,
)
}
)
obj.name = self.name
return obj
@_cudf_nvtx_annotate
def replace(self, to_replace=None, value=None, *args, **kwargs):
if is_dict_like(to_replace) and value is not None:
raise ValueError(
"Series.replace cannot use dict-like to_replace and non-None "
"value"
)
return super().replace(to_replace, value, *args, **kwargs)
@_cudf_nvtx_annotate
def update(self, other):
"""
Modify Series in place using values from passed Series.
Uses non-NA values from passed Series to make updates. Aligns
on index.
Parameters
----------
other : Series, or object coercible into Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update(cudf.Series([4, 5, 6]))
>>> s
0 4
1 5
2 6
dtype: int64
>>> s = cudf.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
>>> s.update(cudf.Series(['d', 'e'], index=[0, 2]))
>>> s
0 d
1 b
2 e
dtype: object
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update(cudf.Series([4, 5, 6, 7, 8]))
>>> s
0 4
1 5
2 6
dtype: int64
If ``other`` contains NaNs the corresponding values are not updated
in the original Series.
>>> s = cudf.Series([1.0, 2.0, 3.0])
>>> s
0 1.0
1 2.0
2 3.0
dtype: float64
>>> s.update(cudf.Series([4.0, np.nan, 6.0], nan_as_null=False))
>>> s
0 4.0
1 2.0
2 6.0
dtype: float64
``other`` can also be a non-Series object type
that is coercible into a Series
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update([4, np.nan, 6])
>>> s
0 4
1 2
2 6
dtype: int64
>>> s = cudf.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.update({1: 9})
>>> s
0 1
1 9
2 3
dtype: int64
"""
if not isinstance(other, cudf.Series):
other = cudf.Series(other)
if not self.index.equals(other.index):
other = other.reindex(index=self.index)
mask = other.notna()
self.mask(mask, other, inplace=True)
# UDF related
@_cudf_nvtx_annotate
def apply(self, func, convert_dtype=True, args=(), **kwargs):
"""
Apply a scalar function to the values of a Series.
Similar to ``pandas.Series.apply``.
``apply`` relies on Numba to JIT compile ``func``.
Thus the allowed operations within ``func`` are limited to `those
supported by the CUDA Python Numba target
<https://numba.readthedocs.io/en/stable/cuda/cudapysupported.html>`__.
For more information, see the `cuDF guide to user defined functions
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>`__.
Some string functions and methods are supported. Refer to the guide
to UDFs for details.
Parameters
----------
func : function
Scalar Python function to apply.
convert_dtype : bool, default True
In cuDF, this parameter is always True. Because
cuDF does not support arbitrary object dtypes,
the result will always be the common type as determined
by numba based on the function logic and argument types.
See examples for details.
args : tuple
Positional arguments passed to func after the series value.
**kwargs
Not supported
Returns
-------
result : Series
The mask and index are preserved.
Notes
-----
UDFs are cached in memory to avoid recompilation. The first
call to the UDF will incur compilation overhead. `func` may
call nested functions that are decorated with the decorator
`numba.cuda.jit(device=True)`, otherwise numba will raise a
typing error.
Examples
--------
Apply a basic function to a series:
>>> sr = cudf.Series([1,2,3])
>>> def f(x):
... return x + 1
>>> sr.apply(f)
0 2
1 3
2 4
dtype: int64
Apply a basic function to a series with nulls:
>>> sr = cudf.Series([1,cudf.NA,3])
>>> def f(x):
... return x + 1
>>> sr.apply(f)
0 2
1 <NA>
2 4
dtype: int64
Use a function that does something conditionally,
based on if the value is or is not null:
>>> sr = cudf.Series([1,cudf.NA,3])
>>> def f(x):
... if x is cudf.NA:
... return 42
... else:
... return x - 1
>>> sr.apply(f)
0 0
1 42
2 2
dtype: int64
Results will be upcast to the common dtype required
as derived from the UDFs logic. Note that this means
the common type will be returned even if such data
is passed that would not result in any values of that
dtype:
>>> sr = cudf.Series([1,cudf.NA,3])
>>> def f(x):
... return x + 1.5
>>> sr.apply(f)
0 2.5
1 <NA>
2 4.5
dtype: float64
UDFs manipulating string data are allowed, as long as
they neither modify strings in place nor create new strings.
For example, the following UDF is allowed:
>>> def f(st):
... if len(st) == 0:
... return -1
... elif st.startswith('a'):
... return 1
... elif 'example' in st:
... return 2
... else:
... return 3
...
>>> sr = cudf.Series(['', 'abc', 'some_example'])
>>> sr.apply(f) # doctest: +SKIP
0 -1
1 1
2 2
dtype: int64
However, the following UDF is not allowed since it includes an
operation that requires the creation of a new string: a call to the
``upper`` method. Methods that are not supported in this manner
will raise an ``AttributeError``.
>>> def f(st):
... new = st.upper()
... return 'ABC' in new
...
>>> sr.apply(f) # doctest: +SKIP
For a complete list of supported functions and methods that may be
used to manipulate string data, see the UDF guide,
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>
"""
if convert_dtype is not True:
raise ValueError("Series.apply only supports convert_dtype=True")
result = self._apply(func, _get_scalar_kernel, *args, **kwargs)
result.name = self.name
return result
#
# Stats
#
@_cudf_nvtx_annotate
def count(self, level=None):
"""
Return number of non-NA/null observations in the Series
Returns
-------
int
Number of non-null values in the Series.
Notes
-----
Parameters currently not supported is `level`.
Examples
--------
>>> import cudf
>>> ser = cudf.Series([1, 5, 2, 4, 3])
>>> ser.count()
5
"""
if level is not None:
raise NotImplementedError("level parameter is not implemented yet")
return self.valid_count
@_cudf_nvtx_annotate
def mode(self, dropna=True):
"""
Return the mode(s) of the dataset.
Always returns Series even if only one value is returned.
Parameters
----------
dropna : bool, default True
Don't consider counts of NA/NaN/NaT.
Returns
-------
Series
Modes of the Series in sorted order.
Examples
--------
>>> import cudf
>>> series = cudf.Series([7, 6, 5, 4, 3, 2, 1])
>>> series
0 7
1 6
2 5
3 4
4 3
5 2
6 1
dtype: int64
>>> series.mode()
0 1
1 2
2 3
3 4
4 5
5 6
6 7
dtype: int64
We can include ``<NA>`` values in mode by
passing ``dropna=False``.
>>> series = cudf.Series([7, 4, 3, 3, 7, None, None])
>>> series
0 7
1 4
2 3
3 3
4 7
5 <NA>
6 <NA>
dtype: int64
>>> series.mode()
0 3
1 7
dtype: int64
>>> series.mode(dropna=False)
0 3
1 7
2 <NA>
dtype: int64
"""
val_counts = self.value_counts(ascending=False, dropna=dropna)
if len(val_counts) > 0:
val_counts = val_counts[val_counts == val_counts.iloc[0]]
return Series._from_data(
{self.name: val_counts.index.sort_values()}, name=self.name
)
@_cudf_nvtx_annotate
def round(self, decimals=0, how="half_even"):
if not is_integer(decimals):
raise ValueError(
f"decimals must be an int, got {type(decimals).__name__}"
)
decimals = int(decimals)
return super().round(decimals, how)
@_cudf_nvtx_annotate
def cov(self, other, min_periods=None):
"""
Compute covariance with Series, excluding missing values.
Parameters
----------
other : Series
Series with which to compute the covariance.
Returns
-------
float
Covariance between Series and other normalized by N-1
(unbiased estimator).
Notes
-----
`min_periods` parameter is not yet supported.
Examples
--------
>>> import cudf
>>> ser1 = cudf.Series([0.9, 0.13, 0.62])
>>> ser2 = cudf.Series([0.12, 0.26, 0.51])
>>> ser1.cov(ser2)
-0.015750000000000004
"""
if min_periods is not None:
raise NotImplementedError(
"min_periods parameter is not implemented yet"
)
if self.empty or other.empty:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
lhs = self.nans_to_nulls().dropna()
rhs = other.nans_to_nulls().dropna()
lhs, rhs = _align_indices([lhs, rhs], how="inner")
try:
return lhs._column.cov(rhs._column)
except AttributeError:
raise TypeError(
f"cannot perform covariance with types {self.dtype}, "
f"{other.dtype}"
)
@_cudf_nvtx_annotate
def transpose(self):
"""Return the transpose, which is by definition self."""
return self
T = property(transpose, doc=transpose.__doc__)
@_cudf_nvtx_annotate
def duplicated(self, keep="first"):
"""
Indicate duplicate Series values.
Duplicated values are indicated as ``True`` values in the resulting
Series. Either all duplicates, all except the first or all except the
last occurrence of duplicates can be indicated.
Parameters
----------
keep : {'first', 'last', False}, default 'first'
Method to handle dropping duplicates:
- ``'first'`` : Mark duplicates as ``True`` except for the first
occurrence.
- ``'last'`` : Mark duplicates as ``True`` except for the last
occurrence.
- ``False`` : Mark all duplicates as ``True``.
Returns
-------
Series[bool]
Series indicating whether each value has occurred in the
preceding values.
See Also
--------
Index.duplicated : Equivalent method on cudf.Index.
DataFrame.duplicated : Equivalent method on cudf.DataFrame.
Series.drop_duplicates : Remove duplicate values from Series.
Examples
--------
By default, for each set of duplicated values, the first occurrence is
set on False and all others on True:
>>> import cudf
>>> animals = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama'])
>>> animals.duplicated()
0 False
1 False
2 True
3 False
4 True
dtype: bool
which is equivalent to
>>> animals.duplicated(keep='first')
0 False
1 False
2 True
3 False
4 True
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True:
>>> animals.duplicated(keep='last')
0 True
1 False
2 True
3 False
4 False
dtype: bool
By setting keep on ``False``, all duplicates are True:
>>> animals.duplicated(keep=False)
0 True
1 False
2 True
3 False
4 True
dtype: bool
"""
return super().duplicated(keep=keep)
@_cudf_nvtx_annotate
def corr(self, other, method="pearson", min_periods=None):
"""Calculates the sample correlation between two Series,
excluding missing values.
Parameters
----------
other : Series
Series with which to compute the correlation.
method : {'pearson', 'spearman'}, default 'pearson'
Method used to compute correlation:
- pearson : Standard correlation coefficient
- spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations needed to have a valid result.
Examples
--------
>>> import cudf
>>> ser1 = cudf.Series([0.9, 0.13, 0.62])
>>> ser2 = cudf.Series([0.12, 0.26, 0.51])
>>> ser1.corr(ser2, method="pearson")
-0.20454263717316112
>>> ser1.corr(ser2, method="spearman")
-0.5
"""
if method not in {"pearson", "spearman"}:
raise ValueError(f"Unknown method {method}")
if min_periods is not None:
raise NotImplementedError("Unsupported argument 'min_periods'")
if self.empty or other.empty:
return cudf.utils.dtypes._get_nan_for_dtype(self.dtype)
lhs = self.nans_to_nulls().dropna()
rhs = other.nans_to_nulls().dropna()
lhs, rhs = _align_indices([lhs, rhs], how="inner")
if method == "spearman":
lhs = lhs.rank()
rhs = rhs.rank()
try:
return lhs._column.corr(rhs._column)
except AttributeError:
raise TypeError(
f"cannot perform corr with types {self.dtype}, {other.dtype}"
)
@_cudf_nvtx_annotate
def autocorr(self, lag=1):
"""Compute the lag-N autocorrelation. This method computes the Pearson
correlation between the Series and its shifted self.
Parameters
----------
lag : int, default 1
Number of lags to apply before performing autocorrelation.
Returns
-------
result : float
The Pearson correlation between self and self.shift(lag).
Examples
--------
>>> import cudf
>>> s = cudf.Series([0.25, 0.5, 0.2, -0.05, 0.17])
>>> s.autocorr()
0.1438853844...
>>> s.autocorr(lag=2)
-0.9647548490...
"""
return self.corr(self.shift(lag))
@_cudf_nvtx_annotate
def isin(self, values):
"""Check whether values are contained in Series.
Parameters
----------
values : set or list-like
The sequence of values to test. Passing in a single string will
raise a TypeError. Instead, turn a single string into a list
of one element.
Returns
-------
result : Series
Series of booleans indicating if each element is in values.
Raises
------
TypeError
If values is a string
Examples
--------
>>> import cudf
>>> s = cudf.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Strings and integers are distinct and are therefore not comparable:
>>> cudf.Series([1]).isin(['1'])
0 False
dtype: bool
>>> cudf.Series([1.1]).isin(['1.1'])
0 False
dtype: bool
"""
# Even though only list-like objects are supposed to be passed, only
# scalars throw errors. Other types (like dicts) just transparently
# return False (see the implementation of ColumnBase.isin).
if is_scalar(values):
raise TypeError(
"only list-like objects are allowed to be passed "
f"to isin(), you passed a [{type(values).__name__}]"
)
return Series._from_data(
{self.name: self._column.isin(values)}, index=self.index
)
@_cudf_nvtx_annotate
def unique(self):
"""
Returns unique values of this Series.
Returns
-------
Series
A series with only the unique values.
Examples
--------
>>> import cudf
>>> series = cudf.Series(['a', 'a', 'b', None, 'b', None, 'c'])
>>> series
0 a
1 a
2 b
3 <NA>
4 b
5 <NA>
6 c
dtype: object
>>> series.unique()
0 a
1 b
2 <NA>
3 c
dtype: object
"""
res = self._column.unique()
if cudf.get_option("mode.pandas_compatible"):
return res.values
return Series(res, name=self.name)
@_cudf_nvtx_annotate
def value_counts(
self,
normalize=False,
sort=True,
ascending=False,
bins=None,
dropna=True,
):
"""Return a Series containing counts of unique values.
The resulting object will be in descending order so that
the first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain
the relative frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
only works with numeric data.
dropna : bool, default True
Don't include counts of NaN and None.
Returns
-------
result : Series containing counts of unique values.
See Also
--------
Series.count
Number of non-NA elements in a Series.
cudf.DataFrame.count
Number of non-NA elements in a DataFrame.
Examples
--------
>>> import cudf
>>> sr = cudf.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0, None])
>>> sr
0 1.0
1 2.0
2 2.0
3 3.0
4 3.0
5 3.0
6 <NA>
dtype: float64
>>> sr.value_counts()
3.0 3
2.0 2
1.0 1
dtype: int64
The order of the counts can be changed by passing ``ascending=True``:
>>> sr.value_counts(ascending=True)
1.0 1
2.0 2
3.0 3
dtype: int64
With ``normalize`` set to True, returns the relative frequency
by dividing all values by the sum of values.
>>> sr.value_counts(normalize=True)
3.0 0.500000
2.0 0.333333
1.0 0.166667
dtype: float64
To include ``NA`` value counts, pass ``dropna=False``:
>>> sr = cudf.Series([1.0, 2.0, 2.0, 3.0, None, 3.0, 3.0, None])
>>> sr
0 1.0
1 2.0
2 2.0
3 3.0
4 <NA>
5 3.0
6 3.0
7 <NA>
dtype: float64
>>> sr.value_counts(dropna=False)
3.0 3
2.0 2
<NA> 2
1.0 1
dtype: int64
>>> s = cudf.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
"""
if bins is not None:
series_bins = cudf.cut(self, bins, include_lowest=True)
if dropna and self.null_count == len(self):
return Series(
[],
dtype=np.int64,
name=self.name,
index=cudf.Index([], dtype=self.dtype),
)
if bins is not None:
res = self.groupby(series_bins, dropna=dropna).count(dropna=dropna)
res = res[res.index.notna()]
else:
res = self.groupby(self, dropna=dropna).count(dropna=dropna)
if isinstance(self.dtype, cudf.CategoricalDtype) and len(
res
) != len(self.dtype.categories):
# For categorical dtypes: When there exists
# categories in dtypes and they are missing in the
# column, `value_counts` will have to return
# their occurrences as 0.
# TODO: Remove this workaround once `observed`
# parameter support is added to `groupby`
res = res.reindex(self.dtype.categories).fillna(0)
res._index = res._index.astype(self.dtype)
res.index.name = None
if sort:
res = res.sort_values(ascending=ascending)
if normalize:
res = res / float(res._column.sum())
# Pandas returns an IntervalIndex as the index of res
# this condition makes sure we do too if bins is given
if bins is not None and len(res) == len(res.index.categories):
int_index = IntervalColumn.as_interval_column(
res.index._column, res.index.categories.dtype
)
res.index = int_index
return res
@_cudf_nvtx_annotate
def quantile(
self, q=0.5, interpolation="linear", exact=True, quant_index=True
):
"""
Return values at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points i and j:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
exact : boolean
Whether to use approximate or exact quantile algorithm.
quant_index : boolean
Whether to use the list of quantiles as index.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 2, 3, 4])
>>> series
0 1
1 2
2 3
3 4
dtype: int64
>>> series.quantile(0.5)
2.5
>>> series.quantile([0.25, 0.5, 0.75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
return_scalar = is_scalar(q)
if return_scalar:
np_array_q = np.asarray([float(q)])
else:
try:
np_array_q = np.asarray(q)
except TypeError:
try:
np_array_q = cudf.core.column.as_column(q).values_host
except TypeError:
raise TypeError(
f"q must be a scalar or array-like, got {type(q)}"
)
result = self._column.quantile(
np_array_q, interpolation, exact, return_scalar=return_scalar
)
if return_scalar:
return result
return Series._from_data(
data={self.name: result},
index=as_index(np_array_q) if quant_index else None,
)
@docutils.doc_describe()
@_cudf_nvtx_annotate
def describe(
self,
percentiles=None,
include=None,
exclude=None,
datetime_is_numeric=False,
):
"""{docstring}"""
if not datetime_is_numeric:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"`datetime_is_numeric` is deprecated and will be removed in "
"a future release. Specify `datetime_is_numeric=True` to "
"silence this warning and adopt the future behavior now.",
FutureWarning,
)
if percentiles is not None:
if not all(0 <= x <= 1 for x in percentiles):
raise ValueError(
"All percentiles must be between 0 and 1, " "inclusive."
)
# describe always includes 50th percentile
percentiles = list(percentiles)
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.sort(percentiles)
else:
# pandas defaults
percentiles = np.array([0.25, 0.5, 0.75])
dtype = "str"
if is_bool_dtype(self.dtype):
data = _describe_categorical(self, percentiles)
elif isinstance(self._column, cudf.core.column.NumericalColumn):
data = _describe_numeric(self, percentiles)
dtype = None
elif isinstance(self._column, TimeDeltaColumn):
data = _describe_timedelta(self, percentiles)
elif isinstance(self._column, DatetimeColumn):
data = _describe_timestamp(self, percentiles)
else:
data = _describe_categorical(self, percentiles)
return Series(
data=data.values(),
index=data.keys(),
dtype=dtype,
nan_as_null=False,
name=self.name,
)
@_cudf_nvtx_annotate
def digitize(self, bins, right=False):
"""Return the indices of the bins to which each value belongs.
Notes
-----
Monotonicity of bins is assumed and not checked.
Parameters
----------
bins : np.array
1-D monotonically, increasing array with same type as this series.
right : bool
Indicates whether interval contains the right or left bin edge.
Returns
-------
A new Series containing the indices.
Examples
--------
>>> import cudf
>>> s = cudf.Series([0.2, 6.4, 3.0, 1.6])
>>> bins = cudf.Series([0.0, 1.0, 2.5, 4.0, 10.0])
>>> inds = s.digitize(bins)
>>> inds
0 1
1 4
2 3
3 2
dtype: int32
"""
return Series(
cudf.core.column.numerical.digitize(self._column, bins, right)
)
@_cudf_nvtx_annotate
def diff(self, periods=1):
"""First discrete difference of element.
Calculates the difference of a Series element compared with another
element in the Series (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference,
accepts negative values.
Returns
-------
Series
First differences of the Series.
Examples
--------
>>> import cudf
>>> series = cudf.Series([1, 1, 2, 3, 5, 8])
>>> series
0 1
1 1
2 2
3 3
4 5
5 8
dtype: int64
Difference with previous row
>>> series.diff()
0 <NA>
1 0
2 1
3 1
4 2
5 3
dtype: int64
Difference with 3rd previous row
>>> series.diff(periods=3)
0 <NA>
1 <NA>
2 <NA>
3 2
4 4
5 6
dtype: int64
Difference with following row
>>> series.diff(periods=-1)
0 0
1 -1
2 -1
3 -2
4 -3
5 <NA>
dtype: int64
"""
if not is_integer(periods):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
return self - self.shift(periods=periods)
@_cudf_nvtx_annotate
@docutils.doc_apply(
groupby_doc_template.format(
ret=textwrap.dedent(
"""
Returns
-------
SeriesGroupBy
Returns a SeriesGroupBy object that contains
information about the groups.
"""
)
)
)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=no_default,
group_keys=False,
squeeze=False,
observed=True,
dropna=True,
):
return super().groupby(
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
observed,
dropna,
)
@_cudf_nvtx_annotate
def rename(self, index=None, copy=True):
"""
Alter Series name
Change Series.name with a scalar value
Parameters
----------
index : Scalar, optional
Scalar to alter the Series.name attribute
copy : boolean, default True
Also copy underlying data
Returns
-------
Series
Notes
-----
Difference from pandas:
- Supports scalar values only for changing name attribute
- Not supporting : inplace, level
Examples
--------
>>> import cudf
>>> series = cudf.Series([10, 20, 30])
>>> series
0 10
1 20
2 30
dtype: int64
>>> series.name
>>> renamed_series = series.rename('numeric_series')
>>> renamed_series
0 10
1 20
2 30
Name: numeric_series, dtype: int64
>>> renamed_series.name
'numeric_series'
"""
out_data = self._data.copy(deep=copy)
return Series._from_data(out_data, self.index, name=index)
@_cudf_nvtx_annotate
def add_prefix(self, prefix):
return Series._from_data(
data=self._data.copy(deep=True),
index=prefix + self.index.astype(str),
)
@_cudf_nvtx_annotate
def add_suffix(self, suffix):
return Series._from_data(
data=self._data.copy(deep=True),
index=self.index.astype(str) + suffix,
)
@_cudf_nvtx_annotate
def keys(self):
"""
Return alias for index.
Returns
-------
Index
Index of the Series.
Examples
--------
>>> import cudf
>>> sr = cudf.Series([10, 11, 12, 13, 14, 15])
>>> sr
0 10
1 11
2 12
3 13
4 14
5 15
dtype: int64
>>> sr.keys()
RangeIndex(start=0, stop=6, step=1)
>>> sr = cudf.Series(['a', 'b', 'c'])
>>> sr
0 a
1 b
2 c
dtype: object
>>> sr.keys()
RangeIndex(start=0, stop=3, step=1)
>>> sr = cudf.Series([1, 2, 3], index=['a', 'b', 'c'])
>>> sr
a 1
b 2
c 3
dtype: int64
>>> sr.keys()
StringIndex(['a' 'b' 'c'], dtype='object')
"""
return self.index
@_cudf_nvtx_annotate
def explode(self, ignore_index=False):
"""
Transform each element of a list-like to a row, replicating index
values.
Parameters
----------
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([[1, 2, 3], [], None, [4, 5]])
>>> s
0 [1, 2, 3]
1 []
2 None
3 [4, 5]
dtype: list
>>> s.explode()
0 1
0 2
0 3
1 <NA>
2 <NA>
3 4
3 5
dtype: int64
"""
return super()._explode(self.name, ignore_index)
@_cudf_nvtx_annotate
def pct_change(
self, periods=1, fill_method="ffill", limit=None, freq=None
):
"""
Calculates the percent change between sequential elements
in the Series.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'ffill'
How to handle NAs before computing percent changes.
limit : int, optional
The number of consecutive NAs to fill before stopping.
Not yet implemented.
freq : str, optional
Increment to use from time series API.
Not yet implemented.
Returns
-------
Series
"""
if limit is not None:
raise NotImplementedError("limit parameter not supported yet.")
if freq is not None:
raise NotImplementedError("freq parameter not supported yet.")
elif fill_method not in {"ffill", "pad", "bfill", "backfill"}:
raise ValueError(
"fill_method must be one of 'ffill', 'pad', "
"'bfill', or 'backfill'."
)
data = self.fillna(method=fill_method, limit=limit)
diff = data.diff(periods=periods)
change = diff / data.shift(periods=periods, freq=freq)
return change
@_cudf_nvtx_annotate
def where(self, cond, other=None, inplace=False):
result_col = super().where(cond, other, inplace)
return self._mimic_inplace(
self._from_data_like_self({self.name: result_col}),
inplace=inplace,
)
def make_binop_func(op):
# This function is used to wrap binary operations in Frame with an
# appropriate API for Series as required for pandas compatibility. The
# main effect is reordering and error-checking parameters in
# Series-specific ways.
wrapped_func = getattr(IndexedFrame, op)
@functools.wraps(wrapped_func)
def wrapper(self, other, level=None, fill_value=None, axis=0):
if axis != 0:
raise NotImplementedError("Only axis=0 supported at this time.")
return wrapped_func(self, other, axis, level, fill_value)
# functools.wraps copies module level attributes to `wrapper` and sets
# __wrapped__ attributes to `wrapped_func`. Cpython looks up the signature
# string of a function by recursively delving into __wrapped__ until
# it hits the first function that has __signature__ attribute set. To make
# the signature string of `wrapper` matches with its actual parameter list,
# we directly set the __signature__ attribute of `wrapper` below.
new_sig = inspect.signature(
lambda self, other, level=None, fill_value=None, axis=0: None
)
wrapper.__signature__ = new_sig
return wrapper
# Wrap all Frame binop functions with the expected API for Series.
for binop in (
"add",
"radd",
"subtract",
"sub",
"rsub",
"multiply",
"mul",
"rmul",
"mod",
"rmod",
"pow",
"rpow",
"floordiv",
"rfloordiv",
"truediv",
"div",
"divide",
"rtruediv",
"rdiv",
"eq",
"ne",
"lt",
"le",
"gt",
"ge",
):
setattr(Series, binop, make_binop_func(binop))
class DatetimeProperties:
"""
Accessor object for datetimelike properties of the Series values.
Returns
-------
Returns a Series indexed like the original Series.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> seconds_series = cudf.Series(pd.date_range("2000-01-01", periods=3,
... freq="s"))
>>> seconds_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> seconds_series.dt.second
0 0
1 1
2 2
dtype: int16
>>> hours_series = cudf.Series(pd.date_range("2000-01-01", periods=3,
... freq="h"))
>>> hours_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> hours_series.dt.hour
0 0
1 1
2 2
dtype: int16
>>> weekday_series = cudf.Series(pd.date_range("2000-01-01", periods=3,
... freq="q"))
>>> weekday_series
0 2000-03-31
1 2000-06-30
2 2000-09-30
dtype: datetime64[ns]
>>> weekday_series.dt.weekday
0 4
1 4
2 5
dtype: int16
"""
def __init__(self, series):
self.series = series
@property # type: ignore
@_cudf_nvtx_annotate
def year(self):
"""
The year of the datetime.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="Y"))
>>> datetime_series
0 2000-12-31
1 2001-12-31
2 2002-12-31
dtype: datetime64[ns]
>>> datetime_series.dt.year
0 2000
1 2001
2 2002
dtype: int16
"""
return self._get_dt_field("year")
@property # type: ignore
@_cudf_nvtx_annotate
def month(self):
"""
The month as January=1, December=12.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="M"))
>>> datetime_series
0 2000-01-31
1 2000-02-29
2 2000-03-31
dtype: datetime64[ns]
>>> datetime_series.dt.month
0 1
1 2
2 3
dtype: int16
"""
return self._get_dt_field("month")
@property # type: ignore
@_cudf_nvtx_annotate
def day(self):
"""
The day of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="D"))
>>> datetime_series
0 2000-01-01
1 2000-01-02
2 2000-01-03
dtype: datetime64[ns]
>>> datetime_series.dt.day
0 1
1 2
2 3
dtype: int16
"""
return self._get_dt_field("day")
@property # type: ignore
@_cudf_nvtx_annotate
def hour(self):
"""
The hours of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="h"))
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 01:00:00
2 2000-01-01 02:00:00
dtype: datetime64[ns]
>>> datetime_series.dt.hour
0 0
1 1
2 2
dtype: int16
"""
return self._get_dt_field("hour")
@property # type: ignore
@_cudf_nvtx_annotate
def minute(self):
"""
The minutes of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="T"))
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:01:00
2 2000-01-01 00:02:00
dtype: datetime64[ns]
>>> datetime_series.dt.minute
0 0
1 1
2 2
dtype: int16
"""
return self._get_dt_field("minute")
@property # type: ignore
@_cudf_nvtx_annotate
def second(self):
"""
The seconds of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="s"))
>>> datetime_series
0 2000-01-01 00:00:00
1 2000-01-01 00:00:01
2 2000-01-01 00:00:02
dtype: datetime64[ns]
>>> datetime_series.dt.second
0 0
1 1
2 2
dtype: int16
"""
return self._get_dt_field("second")
@property # type: ignore
@_cudf_nvtx_annotate
def microsecond(self):
"""
The microseconds of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="us"))
>>> datetime_series
0 2000-01-01 00:00:00.000000
1 2000-01-01 00:00:00.000001
2 2000-01-01 00:00:00.000002
dtype: datetime64[ns]
>>> datetime_series.dt.microsecond
0 0
1 1
2 2
dtype: int32
"""
return Series(
data=(
# Need to manually promote column to int32 because
# pandas-matching binop behaviour requires that this
# __mul__ returns an int16 column.
self.series._column.get_dt_field("millisecond").astype("int32")
* cudf.Scalar(1000, dtype="int32")
)
+ self.series._column.get_dt_field("microsecond"),
index=self.series._index,
name=self.series.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def nanosecond(self):
"""
The nanoseconds of the datetime.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range("2000-01-01",
... periods=3, freq="ns"))
>>> datetime_series
0 2000-01-01 00:00:00.000000000
1 2000-01-01 00:00:00.000000001
2 2000-01-01 00:00:00.000000002
dtype: datetime64[ns]
>>> datetime_series.dt.nanosecond
0 0
1 1
2 2
dtype: int16
"""
return self._get_dt_field("nanosecond")
@property # type: ignore
@_cudf_nvtx_annotate
def weekday(self):
"""
The day of the week with Monday=0, Sunday=6.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range('2016-12-31',
... '2017-01-08', freq='D'))
>>> datetime_series
0 2016-12-31
1 2017-01-01
2 2017-01-02
3 2017-01-03
4 2017-01-04
5 2017-01-05
6 2017-01-06
7 2017-01-07
8 2017-01-08
dtype: datetime64[ns]
>>> datetime_series.dt.weekday
0 5
1 6
2 0
3 1
4 2
5 3
6 4
7 5
8 6
dtype: int16
"""
return self._get_dt_field("weekday")
@property # type: ignore
@_cudf_nvtx_annotate
def dayofweek(self):
"""
The day of the week with Monday=0, Sunday=6.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range('2016-12-31',
... '2017-01-08', freq='D'))
>>> datetime_series
0 2016-12-31
1 2017-01-01
2 2017-01-02
3 2017-01-03
4 2017-01-04
5 2017-01-05
6 2017-01-06
7 2017-01-07
8 2017-01-08
dtype: datetime64[ns]
>>> datetime_series.dt.dayofweek
0 5
1 6
2 0
3 1
4 2
5 3
6 4
7 5
8 6
dtype: int16
"""
return self._get_dt_field("weekday")
@property # type: ignore
@_cudf_nvtx_annotate
def dayofyear(self):
"""
The day of the year, from 1-365 in non-leap years and
from 1-366 in leap years.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range('2016-12-31',
... '2017-01-08', freq='D'))
>>> datetime_series
0 2016-12-31
1 2017-01-01
2 2017-01-02
3 2017-01-03
4 2017-01-04
5 2017-01-05
6 2017-01-06
7 2017-01-07
8 2017-01-08
dtype: datetime64[ns]
>>> datetime_series.dt.dayofyear
0 366
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
dtype: int16
"""
return self._get_dt_field("day_of_year")
@property # type: ignore
@_cudf_nvtx_annotate
def day_of_year(self):
"""
The day of the year, from 1-365 in non-leap years and
from 1-366 in leap years.
Examples
--------
>>> import pandas as pd
>>> import cudf
>>> datetime_series = cudf.Series(pd.date_range('2016-12-31',
... '2017-01-08', freq='D'))
>>> datetime_series
0 2016-12-31
1 2017-01-01
2 2017-01-02
3 2017-01-03
4 2017-01-04
5 2017-01-05
6 2017-01-06
7 2017-01-07
8 2017-01-08
dtype: datetime64[ns]
>>> datetime_series.dt.day_of_year
0 366
1 1
2 2
3 3
4 4
5 5
6 6
7 7
8 8
dtype: int16
"""
return self._get_dt_field("day_of_year")
@property # type: ignore
@_cudf_nvtx_annotate
def is_leap_year(self):
"""
Boolean indicator if the date belongs to a leap year.
A leap year is a year, which has 366 days (instead of 365) including
29th of February as an intercalary day. Leap years are years which are
multiples of four with the exception of years divisible by 100 but not
by 400.
Returns
-------
Series
Booleans indicating if dates belong to a leap year.
Examples
--------
>>> import pandas as pd, cudf
>>> s = cudf.Series(
... pd.date_range(start='2000-02-01', end='2013-02-01', freq='1Y'))
>>> s
0 2000-12-31
1 2001-12-31
2 2002-12-31
3 2003-12-31
4 2004-12-31
5 2005-12-31
6 2006-12-31
7 2007-12-31
8 2008-12-31
9 2009-12-31
10 2010-12-31
11 2011-12-31
12 2012-12-31
dtype: datetime64[ns]
>>> s.dt.is_leap_year
0 True
1 False
2 False
3 False
4 True
5 False
6 False
7 False
8 True
9 False
10 False
11 False
12 True
dtype: bool
"""
res = libcudf.datetime.is_leap_year(self.series._column).fillna(False)
return Series._from_data(
ColumnAccessor({None: res}),
index=self.series._index,
name=self.series.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def quarter(self):
"""
Integer indicator for which quarter of the year the date belongs in.
There are 4 quarters in a year. With the first quarter being from
January - March, second quarter being April - June, third quarter
being July - September and fourth quarter being October - December.
Returns
-------
Series
Integer indicating which quarter the date belongs to.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["2020-05-31 08:00:00","1999-12-31 18:40:00"],
... dtype="datetime64[ms]")
>>> s.dt.quarter
0 2
1 4
dtype: int8
"""
res = libcudf.datetime.extract_quarter(self.series._column).astype(
np.int8
)
return Series._from_data(
{None: res},
index=self.series._index,
name=self.series.name,
)
@_cudf_nvtx_annotate
def isocalendar(self):
"""
Returns a DataFrame with the year, week, and day
calculated according to the ISO 8601 standard.
Returns
-------
DataFrame
with columns year, week and day
Examples
--------
>>> ser = cudf.Series(pd.date_range(start="2021-07-25",
... end="2021-07-30"))
>>> ser.dt.isocalendar()
year week day
0 2021 29 7
1 2021 30 1
2 2021 30 2
3 2021 30 3
4 2021 30 4
5 2021 30 5
>>> ser.dt.isocalendar().week
0 29
1 30
2 30
3 30
4 30
5 30
Name: week, dtype: object
>>> serIndex = cudf.to_datetime(pd.Series(["2010-01-01", pd.NaT]))
>>> serIndex.dt.isocalendar()
year week day
0 2009 53 5
1 <NA> <NA> <NA>
>>> serIndex.dt.isocalendar().year
0 2009
1 <NA>
Name: year, dtype: object
"""
return cudf.core.tools.datetimes._to_iso_calendar(self)
@property # type: ignore
@_cudf_nvtx_annotate
def is_month_start(self):
"""
Booleans indicating if dates are the first day of the month.
"""
return (self.day == 1).fillna(False)
@property # type: ignore
@_cudf_nvtx_annotate
def days_in_month(self):
"""
Get the total number of days in the month that the date falls on.
Returns
-------
Series
Integers representing the number of days in month
Examples
--------
>>> import pandas as pd, cudf
>>> s = cudf.Series(
... pd.date_range(start='2000-08-01', end='2001-08-01', freq='1M'))
>>> s
0 2000-08-31
1 2000-09-30
2 2000-10-31
3 2000-11-30
4 2000-12-31
5 2001-01-31
6 2001-02-28
7 2001-03-31
8 2001-04-30
9 2001-05-31
10 2001-06-30
11 2001-07-31
dtype: datetime64[ns]
>>> s.dt.days_in_month
0 31
1 30
2 31
3 30
4 31
5 31
6 28
7 31
8 30
9 31
10 30
11 31
dtype: int16
"""
res = libcudf.datetime.days_in_month(self.series._column)
return Series._from_data(
ColumnAccessor({None: res}),
index=self.series._index,
name=self.series.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def is_month_end(self):
"""
Boolean indicator if the date is the last day of the month.
Returns
-------
Series
Booleans indicating if dates are the last day of the month.
Examples
--------
>>> import pandas as pd, cudf
>>> s = cudf.Series(
... pd.date_range(start='2000-08-26', end='2000-09-03', freq='1D'))
>>> s
0 2000-08-26
1 2000-08-27
2 2000-08-28
3 2000-08-29
4 2000-08-30
5 2000-08-31
6 2000-09-01
7 2000-09-02
8 2000-09-03
dtype: datetime64[ns]
>>> s.dt.is_month_end
0 False
1 False
2 False
3 False
4 False
5 True
6 False
7 False
8 False
dtype: bool
""" # noqa: E501
last_day = libcudf.datetime.last_day_of_month(self.series._column)
last_day = Series._from_data(
ColumnAccessor({None: last_day}),
index=self.series._index,
name=self.series.name,
)
return (self.day == last_day.dt.day).fillna(False)
@property # type: ignore
@_cudf_nvtx_annotate
def is_quarter_start(self):
"""
Boolean indicator if the date is the first day of a quarter.
Returns
-------
Series
Booleans indicating if dates are the beginning of a quarter
Examples
--------
>>> import pandas as pd, cudf
>>> s = cudf.Series(
... pd.date_range(start='2000-09-26', end='2000-10-03', freq='1D'))
>>> s
0 2000-09-26
1 2000-09-27
2 2000-09-28
3 2000-09-29
4 2000-09-30
5 2000-10-01
6 2000-10-02
7 2000-10-03
dtype: datetime64[ns]
>>> s.dt.is_quarter_start
0 False
1 False
2 False
3 False
4 False
5 True
6 False
7 False
dtype: bool
"""
day = self.series._column.get_dt_field("day")
first_month = self.series._column.get_dt_field("month").isin(
[1, 4, 7, 10]
)
result = ((day == cudf.Scalar(1)) & first_month).fillna(False)
return Series._from_data(
{None: result},
index=self.series._index,
name=self.series.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def is_quarter_end(self):
"""
Boolean indicator if the date is the last day of a quarter.
Returns
-------
Series
Booleans indicating if dates are the end of a quarter
Examples
--------
>>> import pandas as pd, cudf
>>> s = cudf.Series(
... pd.date_range(start='2000-09-26', end='2000-10-03', freq='1D'))
>>> s
0 2000-09-26
1 2000-09-27
2 2000-09-28
3 2000-09-29
4 2000-09-30
5 2000-10-01
6 2000-10-02
7 2000-10-03
dtype: datetime64[ns]
>>> s.dt.is_quarter_end
0 False
1 False
2 False
3 False
4 True
5 False
6 False
7 False
dtype: bool
"""
day = self.series._column.get_dt_field("day")
last_day = libcudf.datetime.last_day_of_month(self.series._column)
last_day = last_day.get_dt_field("day")
last_month = self.series._column.get_dt_field("month").isin(
[3, 6, 9, 12]
)
result = ((day == last_day) & last_month).fillna(False)
return Series._from_data(
{None: result},
index=self.series._index,
name=self.series.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def is_year_start(self):
"""
Boolean indicator if the date is the first day of the year.
Returns
-------
Series
Booleans indicating if dates are the first day of the year.
Examples
--------
>>> import pandas as pd, cudf
>>> s = cudf.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_start
0 False
1 False
2 True
dtype: bool
"""
outcol = self.series._column.get_dt_field(
"day_of_year"
) == cudf.Scalar(1)
return Series._from_data(
{None: outcol.fillna(False)},
index=self.series._index,
name=self.series.name,
)
@property # type: ignore
@_cudf_nvtx_annotate
def is_year_end(self):
"""
Boolean indicator if the date is the last day of the year.
Returns
-------
Series
Booleans indicating if dates are the last day of the year.
Examples
--------
>>> import pandas as pd, cudf
>>> dates = cudf.Series(pd.date_range("2017-12-30", periods=3))
>>> dates
0 2017-12-30
1 2017-12-31
2 2018-01-01
dtype: datetime64[ns]
>>> dates.dt.is_year_end
0 False
1 True
2 False
dtype: bool
"""
day_of_year = self.series._column.get_dt_field("day_of_year")
leap_dates = libcudf.datetime.is_leap_year(self.series._column)
leap = day_of_year == cudf.Scalar(366)
non_leap = day_of_year == cudf.Scalar(365)
result = cudf._lib.copying.copy_if_else(leap, non_leap, leap_dates)
result = result.fillna(False)
return Series._from_data(
{None: result},
index=self.series._index,
name=self.series.name,
)
@_cudf_nvtx_annotate
def _get_dt_field(self, field):
out_column = self.series._column.get_dt_field(field)
return Series(
data=out_column, index=self.series._index, name=self.series.name
)
@_cudf_nvtx_annotate
def ceil(self, freq):
"""
Perform ceil operation on the data to the specified freq.
Parameters
----------
freq : str
One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"].
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
See `frequency aliases <https://pandas.pydata.org/docs/\
user_guide/timeseries.html#timeseries-offset-aliases>`__
for more details on these aliases.
Returns
-------
Series
Series with all timestamps rounded up to the specified frequency.
The index is preserved.
Examples
--------
>>> import cudf
>>> t = cudf.Series(["2001-01-01 00:04:45", "2001-01-01 00:04:58",
... "2001-01-01 00:05:04"], dtype="datetime64[ns]")
>>> t.dt.ceil("T")
0 2001-01-01 00:05:00
1 2001-01-01 00:05:00
2 2001-01-01 00:06:00
dtype: datetime64[ns]
"""
out_column = self.series._column.ceil(freq)
return Series._from_data(
data={self.series.name: out_column}, index=self.series._index
)
@_cudf_nvtx_annotate
def floor(self, freq):
"""
Perform floor operation on the data to the specified freq.
Parameters
----------
freq : str
One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"].
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
See `frequency aliases <https://pandas.pydata.org/docs/\
user_guide/timeseries.html#timeseries-offset-aliases>`__
for more details on these aliases.
Returns
-------
Series
Series with all timestamps rounded up to the specified frequency.
The index is preserved.
Examples
--------
>>> import cudf
>>> t = cudf.Series(["2001-01-01 00:04:45", "2001-01-01 00:04:58",
... "2001-01-01 00:05:04"], dtype="datetime64[ns]")
>>> t.dt.floor("T")
0 2001-01-01 00:04:00
1 2001-01-01 00:04:00
2 2001-01-01 00:05:00
dtype: datetime64[ns]
"""
out_column = self.series._column.floor(freq)
return Series._from_data(
data={self.series.name: out_column}, index=self.series._index
)
@_cudf_nvtx_annotate
def round(self, freq):
"""
Perform round operation on the data to the specified freq.
Parameters
----------
freq : str
One of ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"].
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
See `frequency aliases <https://pandas.pydata.org/docs/\
user_guide/timeseries.html#timeseries-offset-aliases>`__
for more details on these aliases.
Returns
-------
Series
Series with all timestamps rounded to the specified frequency.
The index is preserved.
Examples
--------
>>> import cudf
>>> dt_sr = cudf.Series([
... "2001-01-01 00:04:45",
... "2001-01-01 00:04:58",
... "2001-01-01 00:05:04",
... ], dtype="datetime64[ns]")
>>> dt_sr.dt.round("T")
0 2001-01-01 00:05:00
1 2001-01-01 00:05:00
2 2001-01-01 00:05:00
dtype: datetime64[ns]
"""
out_column = self.series._column.round(freq)
return Series._from_data(
data={self.series.name: out_column}, index=self.series._index
)
@_cudf_nvtx_annotate
def strftime(self, date_format, *args, **kwargs):
"""
Convert to Series using specified ``date_format``.
Return a Series of formatted strings specified by ``date_format``,
which supports the same string format as the python standard library.
Details of the string format can be found in `python string format doc
<https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior>`_.
Parameters
----------
date_format : str
Date format string (e.g. "%Y-%m-%d").
Returns
-------
Series
Series of formatted strings.
Notes
-----
The following date format identifiers are not yet
supported: ``%c``, ``%x``,``%X``
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> weekday_series = cudf.Series(pd.date_range("2000-01-01", periods=3,
... freq="q"))
>>> weekday_series.dt.strftime("%Y-%m-%d")
>>> weekday_series
0 2000-03-31
1 2000-06-30
2 2000-09-30
dtype: datetime64[ns]
0 2000-03-31
1 2000-06-30
2 2000-09-30
dtype: object
>>> weekday_series.dt.strftime("%Y %d %m")
0 2000 31 03
1 2000 30 06
2 2000 30 09
dtype: object
>>> weekday_series.dt.strftime("%Y / %d / %m")
0 2000 / 31 / 03
1 2000 / 30 / 06
2 2000 / 30 / 09
dtype: object
"""
if not isinstance(date_format, str):
raise TypeError(
f"'date_format' must be str, not {type(date_format)}"
)
# TODO: Remove following validations
# once https://github.com/rapidsai/cudf/issues/5991
# is implemented
not_implemented_formats = {
"%c",
"%x",
"%X",
}
for d_format in not_implemented_formats:
if d_format in date_format:
raise NotImplementedError(
f"{d_format} date-time format is not "
f"supported yet, Please follow this issue "
f"https://github.com/rapidsai/cudf/issues/5991 "
f"for tracking purposes."
)
str_col = self.series._column.as_string_column(
dtype="str", format=date_format
)
return Series(
data=str_col, index=self.series._index, name=self.series.name
)
@copy_docstring(DatetimeIndex.tz_localize)
def tz_localize(self, tz, ambiguous="NaT", nonexistent="NaT"):
from cudf.core._internals.timezones import delocalize, localize
if tz is None:
result_col = delocalize(self.series._column)
else:
result_col = localize(
self.series._column, tz, ambiguous, nonexistent
)
return Series._from_data(
data={self.series.name: result_col},
index=self.series._index,
)
@copy_docstring(DatetimeIndex.tz_convert)
def tz_convert(self, tz):
"""
Parameters
----------
tz : str
Time zone for time. Corresponding timestamps would be converted
to this time zone of the Datetime Array/Index.
A `tz` of None will convert to UTC and remove the
timezone information.
"""
from cudf.core._internals.timezones import convert
if tz is None:
result_col = self.series._column._utc_time
else:
result_col = convert(self.series._column, tz)
return Series._from_data(
{self.series.name: result_col}, index=self.series._index
)
class TimedeltaProperties:
"""
Accessor object for timedelta-like properties of the Series values.
Returns
-------
Returns a Series indexed like the original Series.
Examples
--------
>>> import cudf
>>> seconds_series = cudf.Series([1, 2, 3], dtype='timedelta64[s]')
>>> seconds_series
0 00:00:01
1 00:00:02
2 00:00:03
dtype: timedelta64[s]
>>> seconds_series.dt.seconds
0 1
1 2
2 3
dtype: int64
>>> series = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> series
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> series.dt.components
days hours minutes seconds milliseconds microseconds nanoseconds
0 141 13 35 12 123 0 0
1 14 6 0 31 231 0 0
2 13000 10 12 48 712 0 0
3 0 0 35 35 656 0 0
4 37 13 12 14 234 0 0
>>> series.dt.days
0 141
1 14
2 13000
3 0
4 37
dtype: int64
>>> series.dt.seconds
0 48912
1 21631
2 36768
3 2135
4 47534
dtype: int64
>>> series.dt.microseconds
0 123000
1 231000
2 712000
3 656000
4 234000
dtype: int64
>>> s.dt.nanoseconds
0 0
1 0
2 0
3 0
4 0
dtype: int64
"""
def __init__(self, series):
self.series = series
@property # type: ignore
@_cudf_nvtx_annotate
def days(self):
"""
Number of days.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.days
0 141
1 14
2 13000
3 0
4 37
dtype: int64
"""
return self._get_td_field("days")
@property # type: ignore
@_cudf_nvtx_annotate
def seconds(self):
"""
Number of seconds (>= 0 and less than 1 day).
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.seconds
0 48912
1 21631
2 36768
3 2135
4 47534
dtype: int64
>>> s.dt.microseconds
0 123000
1 231000
2 712000
3 656000
4 234000
dtype: int64
"""
return self._get_td_field("seconds")
@property # type: ignore
@_cudf_nvtx_annotate
def microseconds(self):
"""
Number of microseconds (>= 0 and less than 1 second).
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.microseconds
0 123000
1 231000
2 712000
3 656000
4 234000
dtype: int64
"""
return self._get_td_field("microseconds")
@property # type: ignore
@_cudf_nvtx_annotate
def nanoseconds(self):
"""
Return the number of nanoseconds (n), where 0 <= n < 1 microsecond.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656,
... 3244334234], dtype='timedelta64[ns]')
>>> s
0 00:00:12.231312123
1 00:00:01.231231231
2 00:18:43.236768712
3 00:00:00.002135656
4 00:00:03.244334234
dtype: timedelta64[ns]
>>> s.dt.nanoseconds
0 123
1 231
2 712
3 656
4 234
dtype: int64
"""
return self._get_td_field("nanoseconds")
@property # type: ignore
@_cudf_nvtx_annotate
def components(self):
"""
Return a Dataframe of the components of the Timedeltas.
Returns
-------
DataFrame
Examples
--------
>>> s = cudf.Series([12231312123, 1231231231, 1123236768712, 2135656, 3244334234], dtype='timedelta64[ms]')
>>> s
0 141 days 13:35:12.123
1 14 days 06:00:31.231
2 13000 days 10:12:48.712
3 0 days 00:35:35.656
4 37 days 13:12:14.234
dtype: timedelta64[ms]
>>> s.dt.components
days hours minutes seconds milliseconds microseconds nanoseconds
0 141 13 35 12 123 0 0
1 14 6 0 31 231 0 0
2 13000 10 12 48 712 0 0
3 0 0 35 35 656 0 0
4 37 13 12 14 234 0 0
""" # noqa: E501
return self.series._column.components(index=self.series._index)
@_cudf_nvtx_annotate
def _get_td_field(self, field):
out_column = getattr(self.series._column, field)
return Series(
data=out_column, index=self.series._index, name=self.series.name
)
@_cudf_nvtx_annotate
def _align_indices(series_list, how="outer", allow_non_unique=False):
"""
Internal util to align the indices of a list of Series objects
series_list : list of Series objects
how : {"outer", "inner"}
If "outer", the values of the resulting index are the
unique values of the index obtained by concatenating
the indices of all the series.
If "inner", the values of the resulting index are
the values common to the indices of all series.
allow_non_unique : bool
Whether or not to allow non-unique valued indices in the input
series.
"""
if len(series_list) <= 1:
return series_list
# check if all indices are the same
head = series_list[0].index
all_index_equal = True
for sr in series_list[1:]:
if not sr.index.equals(head):
all_index_equal = False
break
# check if all names are the same
all_names_equal = True
for sr in series_list[1:]:
if not sr.index.names == head.names:
all_names_equal = False
new_index_names = [None] * head.nlevels
if all_names_equal:
new_index_names = head.names
if all_index_equal:
return series_list
combined_index = series_list[0].index
for sr in series_list[1:]:
combined_index = (
cudf.DataFrame(index=sr.index).join(
cudf.DataFrame(index=combined_index),
sort=True,
how=how,
)
).index
combined_index.names = new_index_names
# align all Series to the combined index
result = [
sr._align_to_index(
combined_index, how=how, allow_non_unique=allow_non_unique
)
for sr in series_list
]
return result
@acquire_spill_lock()
@_cudf_nvtx_annotate
def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
r"""Returns a boolean array where two arrays are equal within a tolerance.
Two values in ``a`` and ``b`` are considered equal when the following
equation is satisfied.
.. math::
|a - b| \le \mathrm{atol} + \mathrm{rtol} |b|
Parameters
----------
a : list-like, array-like or cudf.Series
Input sequence to compare.
b : list-like, array-like or cudf.Series
Input sequence to compare.
rtol : float
The relative tolerance.
atol : float
The absolute tolerance.
equal_nan : bool
If ``True``, null's in ``a`` will be considered equal
to null's in ``b``.
Returns
-------
Series
See Also
--------
np.isclose : Returns a boolean array where two arrays are element-wise
equal within a tolerance.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series([1.9876543, 2.9876654, 3.9876543, None, 9.9, 1.0])
>>> s2 = cudf.Series([1.987654321, 2.987654321, 3.987654321, None, 19.9,
... None])
>>> s1
0 1.9876543
1 2.9876654
2 3.9876543
3 <NA>
4 9.9
5 1.0
dtype: float64
>>> s2
0 1.987654321
1 2.987654321
2 3.987654321
3 <NA>
4 19.9
5 <NA>
dtype: float64
>>> cudf.isclose(s1, s2)
0 True
1 True
2 True
3 False
4 False
5 False
dtype: bool
>>> cudf.isclose(s1, s2, equal_nan=True)
0 True
1 True
2 True
3 True
4 False
5 False
dtype: bool
>>> cudf.isclose(s1, s2, equal_nan=False)
0 True
1 True
2 True
3 False
4 False
5 False
dtype: bool
"""
if not can_convert_to_column(a):
raise TypeError(
f"Parameter `a` is expected to be a "
f"list-like or Series object, found:{type(a)}"
)
if not can_convert_to_column(b):
raise TypeError(
f"Parameter `b` is expected to be a "
f"list-like or Series object, found:{type(a)}"
)
if isinstance(a, pd.Series):
a = Series.from_pandas(a)
if isinstance(b, pd.Series):
b = Series.from_pandas(b)
index = None
if isinstance(a, cudf.Series) and isinstance(b, cudf.Series):
b = b.reindex(a.index)
index = as_index(a.index)
a_col = column.as_column(a)
a_array = cupy.asarray(a_col.data_array_view(mode="read"))
b_col = column.as_column(b)
b_array = cupy.asarray(b_col.data_array_view(mode="read"))
result = cupy.isclose(
a=a_array, b=b_array, rtol=rtol, atol=atol, equal_nan=equal_nan
)
result_col = column.as_column(result)
if a_col.null_count and b_col.null_count:
a_nulls = a_col.isnull()
b_nulls = b_col.isnull()
null_values = a_nulls | b_nulls
if equal_nan is True:
equal_nulls = a_nulls & b_nulls
del a_nulls, b_nulls
elif a_col.null_count:
null_values = a_col.isnull()
elif b_col.null_count:
null_values = b_col.isnull()
else:
return Series(result_col, index=index)
result_col[null_values] = False
if equal_nan is True and a_col.null_count and b_col.null_count:
result_col[equal_nulls] = True
return Series(result_col, index=index)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/df_protocol.py
|
# Copyright (c) 2021-2023, NVIDIA CORPORATION.
import enum
from collections import abc
from typing import (
Any,
Dict,
Iterable,
Mapping,
Optional,
Sequence,
Tuple,
cast,
)
import cupy as cp
import numpy as np
from numba.cuda import as_cuda_array
import rmm
import cudf
from cudf.core.buffer import Buffer, as_buffer
from cudf.core.column import as_column, build_categorical_column, build_column
# Implementation of interchange protocol classes
# ----------------------------------------------
class _DtypeKind(enum.IntEnum):
INT = 0
UINT = 1
FLOAT = 2
BOOL = 20
STRING = 21 # UTF-8
DATETIME = 22
CATEGORICAL = 23
class _Device(enum.IntEnum):
CPU = 1
CUDA = 2
CPU_PINNED = 3
OPENCL = 4
VULKAN = 7
METAL = 8
VPI = 9
ROCM = 10
class _MaskKind(enum.IntEnum):
NON_NULLABLE = 0
NAN = 1
SENTINEL = 2
BITMASK = 3
BYTEMASK = 4
_SUPPORTED_KINDS = {
_DtypeKind.INT,
_DtypeKind.UINT,
_DtypeKind.FLOAT,
_DtypeKind.CATEGORICAL,
_DtypeKind.BOOL,
_DtypeKind.STRING,
}
ProtoDtype = Tuple[_DtypeKind, int, str, str]
class _CuDFBuffer:
"""
Data in the buffer is guaranteed to be contiguous in memory.
"""
def __init__(
self,
buf: Buffer,
dtype: np.dtype,
allow_copy: bool = True,
) -> None:
"""
Use Buffer object.
"""
# Store the cudf buffer where the data resides as a private
# attribute, so we can use it to retrieve the public attributes
self._buf = buf
self._dtype = dtype
self._allow_copy = allow_copy
@property
def bufsize(self) -> int:
"""
The Buffer size in bytes.
"""
return self._buf.size
@property
def ptr(self) -> int:
"""
Pointer to start of the buffer as an integer.
"""
return self._buf.get_ptr(mode="write")
def __dlpack__(self):
# DLPack not implemented in NumPy yet, so leave it out here.
try:
cuda_array = as_cuda_array(self._buf).view(self._dtype)
return cp.asarray(cuda_array).toDlpack()
except ValueError:
raise TypeError(f"dtype {self._dtype} unsupported by `dlpack`")
def __dlpack_device__(self) -> Tuple[_Device, int]:
"""
_Device type and _Device ID for where the data in the buffer resides.
"""
return (_Device.CUDA, cp.asarray(self._buf).device.id)
def __repr__(self) -> str:
return f"{self.__class__.__name__}(" + str(
{
"bufsize": self.bufsize,
"ptr": self.ptr,
"device": self.__dlpack_device__()[0].name,
}
)
+")"
class _CuDFColumn:
"""
A column object, with only the methods and properties required by the
interchange protocol defined.
A column can contain one or more chunks. Each chunk can contain up to three
buffers - a data buffer, a mask buffer (depending on null representation),
and an offsets buffer (if variable-size binary; e.g., variable-length
strings).
Note: this Column object can only be produced by ``__dataframe__``, so
doesn't need its own version or ``__column__`` protocol.
"""
def __init__(
self,
column: cudf.core.column.ColumnBase,
nan_as_null: bool = True,
allow_copy: bool = True,
) -> None:
"""
Note: doesn't deal with extension arrays yet, just assume a regular
Series/ndarray for now.
"""
if not isinstance(column, cudf.core.column.ColumnBase):
raise TypeError(
"column must be a subtype of df.core.column.ColumnBase,"
f"got {type(column)}"
)
self._col = column
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
def size(self) -> int:
"""
Size of the column, in elements.
"""
return self._col.size
@property
def offset(self) -> int:
"""
Offset of first element. Always zero.
"""
return 0
@property
def dtype(self) -> ProtoDtype:
"""
Dtype description as a tuple
``(kind, bit-width, format string, endianness)``
Kind :
- INT = 0
- UINT = 1
- FLOAT = 2
- BOOL = 20
- STRING = 21 # UTF-8
- DATETIME = 22
- CATEGORICAL = 23
Bit-width : the number of bits as an integer
Format string : data type description format string in Apache Arrow C
Data Interface format.
Endianness : current only native endianness (``=``) is supported
Notes
-----
- Kind specifiers are aligned with DLPack where possible
(hence the jump to 20, leave enough room for future extension)
- Masks must be specified as boolean with either bit width 1
(for bit masks) or 8 (for byte masks).
- Dtype width in bits was preferred over bytes
- Endianness isn't too useful, but included now in case
in the future we need to support non-native endianness
- Went with Apache Arrow format strings over NumPy format strings
because they're more complete from a dataframe perspective
- Format strings are mostly useful for datetime specification,
and for categoricals.
- For categoricals, the format string describes the type of the
categorical in the data buffer. In case of a separate encoding
of the categorical (e.g. an integer to string mapping),
this can be derived from ``self.describe_categorical``.
- Data types not included: complex, Arrow-style null,
binary, decimal, and nested (list, struct, map, union) dtypes.
"""
dtype = self._col.dtype
# For now, assume that, if the column dtype is 'O' (i.e., `object`),
# then we have an array of strings
if not isinstance(dtype, cudf.CategoricalDtype) and dtype.kind == "O":
return (_DtypeKind.STRING, 8, "u", "=")
return self._dtype_from_cudfdtype(dtype)
def _dtype_from_cudfdtype(self, dtype) -> ProtoDtype:
"""
See `self.dtype` for details.
"""
# Note: 'c' (complex) not handled yet (not in array spec v1).
# 'b', 'B' (bytes), 'S', 'a', (old-style string) 'V' (void)
# not handled datetime and timedelta both map to datetime
# (is timedelta handled?)
_np_kinds = {
"i": _DtypeKind.INT,
"u": _DtypeKind.UINT,
"f": _DtypeKind.FLOAT,
"b": _DtypeKind.BOOL,
"U": _DtypeKind.STRING,
"M": _DtypeKind.DATETIME,
"m": _DtypeKind.DATETIME,
}
kind = _np_kinds.get(dtype.kind, None)
if kind is None:
# Not a NumPy/CuPy dtype. Check if it's a categorical maybe
if isinstance(dtype, cudf.CategoricalDtype):
kind = _DtypeKind.CATEGORICAL
# Codes and categories' dtypes are different.
# We use codes' dtype as these are stored in the buffer.
codes = cast(
cudf.core.column.CategoricalColumn, self._col
).codes
dtype = codes.dtype
else:
raise ValueError(
f"Data type {dtype} not supported by exchange protocol"
)
if kind not in _SUPPORTED_KINDS:
raise NotImplementedError(f"Data type {dtype} not handled yet")
bitwidth = dtype.itemsize * 8
format_str = dtype.str
endianness = dtype.byteorder if kind != _DtypeKind.CATEGORICAL else "="
return (kind, bitwidth, format_str, endianness)
@property
def describe_categorical(self) -> Tuple[bool, bool, Dict[int, Any]]:
"""
If the dtype is categorical, there are two options:
- There are only values in the data buffer.
- There is a separate dictionary-style encoding for categorical values.
Raises TypeError if the dtype is not categorical
Content of returned dict:
- "is_ordered" : bool, whether the ordering of dictionary
indices is semantically meaningful.
- "is_dictionary" : bool, whether a dictionary-style mapping of
categorical values to other objects exists
- "mapping" : dict, Python-level only (e.g. ``{int: str}``).
None if not a dictionary-style categorical.
"""
if not self.dtype[0] == _DtypeKind.CATEGORICAL:
raise TypeError(
"`describe_categorical only works on "
"a column with categorical dtype!"
)
categ_col = cast(cudf.core.column.CategoricalColumn, self._col)
ordered = bool(categ_col.dtype.ordered)
is_dictionary = True
# NOTE: this shows the children approach is better, transforming
# `categories` to a "mapping" dict is inefficient
categories = categ_col.categories
mapping = {ix: val for ix, val in enumerate(categories.values_host)}
return ordered, is_dictionary, mapping
@property
def describe_null(self) -> Tuple[int, Any]:
"""
Return the missing value (or "null") representation the column dtype
uses, as a tuple ``(kind, value)``.
Kind:
- 0 : non-nullable
- 1 : NaN/NaT
- 2 : sentinel value
- 3 : bit mask
- 4 : byte mask
Value : if kind is "sentinel value", the actual value.
If kind is a bit mask or a byte mask, the value (0 or 1)
indicating a missing value.
None otherwise.
"""
kind = self.dtype[0]
if self.null_count == 0:
# there is no validity mask so it is non-nullable
return _MaskKind.NON_NULLABLE, None
elif kind in _SUPPORTED_KINDS:
# currently, we return a bit mask
return _MaskKind.BITMASK, 0
else:
raise NotImplementedError(
f"Data type {self.dtype} not yet supported"
)
@property
def null_count(self) -> int:
"""
Number of null elements. Should always be known.
"""
return self._col.null_count
@property
def metadata(self) -> Dict[str, Any]:
"""
Store specific metadata of the column.
"""
return {}
def num_chunks(self) -> int:
"""
Return the number of chunks the column consists of.
"""
return 1
def get_chunks(
self, n_chunks: Optional[int] = None
) -> Iterable["_CuDFColumn"]:
"""
Return an iterable yielding the chunks.
See `DataFrame.get_chunks` for details on ``n_chunks``.
"""
return (self,)
def get_buffers(
self,
) -> Mapping[str, Optional[Tuple[_CuDFBuffer, ProtoDtype]]]:
"""
Return a dictionary containing the underlying buffers.
The returned dictionary has the following contents:
- "data": a two-element tuple whose first element is a buffer
containing the data and whose second element is the data
buffer's associated dtype.
- "validity": a two-element tuple whose first element is a buffer
containing mask values indicating missing data and
whose second element is the mask value buffer's
associated dtype. None if the null representation is
not a bit or byte mask.
- "offsets": a two-element tuple whose first element is a buffer
containing the offset values for variable-size binary
data (e.g., variable-length strings) and whose second
element is the offsets buffer's associated dtype. None
if the data buffer does not have an associated offsets
buffer.
"""
buffers = {}
try:
buffers["validity"] = self._get_validity_buffer()
except RuntimeError:
buffers["validity"] = None
try:
buffers["offsets"] = self._get_offsets_buffer()
except RuntimeError:
buffers["offsets"] = None
buffers["data"] = self._get_data_buffer()
return buffers
def _get_validity_buffer(
self,
) -> Optional[Tuple[_CuDFBuffer, ProtoDtype]]:
"""
Return the buffer containing the mask values
indicating missing data and the buffer's associated dtype.
Raises RuntimeError if null representation is not a bit or byte mask.
"""
null, invalid = self.describe_null
if null == _MaskKind.BITMASK:
assert self._col.mask is not None
buffer = _CuDFBuffer(
self._col.mask, cp.uint8, allow_copy=self._allow_copy
)
dtype = (_DtypeKind.UINT, 8, "C", "=")
return buffer, dtype
elif null == _MaskKind.NAN:
raise RuntimeError(
"This column uses NaN as null "
"so does not have a separate mask"
)
elif null == _MaskKind.NON_NULLABLE:
raise RuntimeError(
"This column is non-nullable so does not have a mask"
)
else:
raise NotImplementedError(
f"See {self.__class__.__name__}.describe_null method."
)
def _get_offsets_buffer(
self,
) -> Optional[Tuple[_CuDFBuffer, ProtoDtype]]:
"""
Return the buffer containing the offset values for
variable-size binary data (e.g., variable-length strings)
and the buffer's associated dtype.
Raises RuntimeError if the data buffer does not have an associated
offsets buffer.
"""
if self.dtype[0] == _DtypeKind.STRING:
offsets = self._col.children[0]
assert (offsets is not None) and (offsets.data is not None), " "
"offsets(.data) should not be None for string column"
buffer = _CuDFBuffer(
offsets.data, offsets.dtype, allow_copy=self._allow_copy
)
dtype = self._dtype_from_cudfdtype(offsets.dtype)
else:
raise RuntimeError(
"This column has a fixed-length dtype "
"so does not have an offsets buffer"
)
return buffer, dtype
def _get_data_buffer(
self,
) -> Tuple[_CuDFBuffer, ProtoDtype]:
"""
Return the buffer containing the data and
the buffer's associated dtype.
"""
if self.dtype[0] in (
_DtypeKind.INT,
_DtypeKind.UINT,
_DtypeKind.FLOAT,
_DtypeKind.BOOL,
):
col_data = self._col
dtype = self.dtype
elif self.dtype[0] == _DtypeKind.CATEGORICAL:
col_data = cast(
cudf.core.column.CategoricalColumn, self._col
).codes
dtype = self._dtype_from_cudfdtype(col_data.dtype)
elif self.dtype[0] == _DtypeKind.STRING:
col_data = self._col.children[1]
dtype = self._dtype_from_cudfdtype(col_data.dtype)
else:
raise NotImplementedError(
f"Data type {self._col.dtype} not handled yet"
)
assert (col_data is not None) and (col_data.data is not None), " "
f"col_data(.data) should not be None when dtype = {dtype}"
buffer = _CuDFBuffer(
col_data.data, col_data.dtype, allow_copy=self._allow_copy
)
return buffer, dtype
class _CuDFDataFrame:
"""
A data frame class, with only the methods required by the interchange
protocol defined.
Instances of this (private) class are returned from
``cudf.DataFrame.__dataframe__`` as objects with the methods and
attributes defined on this class.
"""
def __init__(
self,
df: "cudf.core.dataframe.DataFrame",
nan_as_null: bool = True,
allow_copy: bool = True,
) -> None:
"""
Constructor - an instance of this (private) class is returned from
`cudf.DataFrame.__dataframe__`.
"""
self._df = df
# ``nan_as_null`` is a keyword intended for the consumer to tell the
# producer to overwrite null values in the data with
# ``NaN`` (or ``NaT``).
# This currently has no effect; once support for nullable extension
# dtypes is added, this value should be propagated to columns.
self._nan_as_null = nan_as_null
self._allow_copy = allow_copy
def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
) -> "_CuDFDataFrame":
"""
See the docstring of the `cudf.DataFrame.__dataframe__` for details
"""
return _CuDFDataFrame(
self._df, nan_as_null=nan_as_null, allow_copy=allow_copy
)
@property
def metadata(self):
# `index` isn't a regular column, and the protocol doesn't support row
# labels - so we export it as cuDF-specific metadata here.
return {"cudf.index": self._df.index}
def num_columns(self) -> int:
return len(self._df._column_names)
def num_rows(self) -> int:
return len(self._df)
def num_chunks(self) -> int:
return 1
def column_names(self) -> Iterable[str]:
return self._df._column_names
def get_column(self, i: int) -> _CuDFColumn:
return _CuDFColumn(
as_column(self._df.iloc[:, i]), allow_copy=self._allow_copy
)
def get_column_by_name(self, name: str) -> _CuDFColumn:
return _CuDFColumn(
as_column(self._df[name]), allow_copy=self._allow_copy
)
def get_columns(self) -> Iterable[_CuDFColumn]:
return [
_CuDFColumn(as_column(self._df[name]), allow_copy=self._allow_copy)
for name in self._df.columns
]
def select_columns(self, indices: Sequence[int]) -> "_CuDFDataFrame":
if not isinstance(indices, abc.Sequence):
raise ValueError("`indices` is not a sequence")
return _CuDFDataFrame(self._df.iloc[:, indices])
def select_columns_by_name(self, names: Sequence[str]) -> "_CuDFDataFrame":
if not isinstance(names, abc.Sequence):
raise ValueError("`names` is not a sequence")
return _CuDFDataFrame(
self._df.loc[:, names], self._nan_as_null, self._allow_copy
)
def get_chunks(
self, n_chunks: Optional[int] = None
) -> Iterable["_CuDFDataFrame"]:
"""
Return an iterator yielding the chunks.
"""
return (self,)
def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
) -> _CuDFDataFrame:
"""
The public method to attach to cudf.DataFrame.
``nan_as_null`` is a keyword intended for the consumer to tell the
producer to overwrite null values in the data with ``NaN`` (or ``NaT``).
This currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
``allow_copy`` is a keyword that defines whether or not the library is
allowed to make a copy of the data. For example, copying data would be
necessary if a library supports strided buffers, given that this protocol
specifies contiguous buffers.
"""
return _CuDFDataFrame(self, nan_as_null=nan_as_null, allow_copy=allow_copy)
"""
Implementation of the dataframe exchange protocol.
Public API
----------
from_dataframe : construct a cudf.DataFrame from an input data frame which
implements the exchange protocol
Notes
-----
- Interpreting a raw pointer (as in ``Buffer.ptr``) is annoying and
unsafe to do in pure Python. It's more general but definitely less friendly
than having ``to_arrow`` and ``to_numpy`` methods. So for the buffers which
lack ``__dlpack__`` (e.g., because the column dtype isn't supported by
DLPack), this is worth looking at again.
"""
# A typing protocol could be added later to let Mypy validate code using
# `from_dataframe` better.
DataFrameObject = Any
ColumnObject = Any
_INTS = {8: cp.int8, 16: cp.int16, 32: cp.int32, 64: cp.int64}
_UINTS = {8: cp.uint8, 16: cp.uint16, 32: cp.uint32, 64: cp.uint64}
_FLOATS = {32: cp.float32, 64: cp.float64}
_CP_DTYPES = {
0: _INTS,
1: _UINTS,
2: _FLOATS,
20: {8: bool},
21: {8: cp.uint8},
}
def from_dataframe(
df: DataFrameObject, allow_copy: bool = False
) -> _CuDFDataFrame:
"""
Construct a ``DataFrame`` from ``df`` if it supports the
dataframe interchange protocol (``__dataframe__``).
Parameters
----------
df : DataFrameObject
Object supporting dataframe interchange protocol
allow_copy : bool
If ``True``, allow copying of the data. If ``False``, a
``TypeError`` is raised if data copying is required to
construct the ``DataFrame`` (e.g., if ``df`` lives in CPU
memory).
Returns
-------
DataFrame
Examples
--------
>>> import pandas as pd
>>> pdf = pd.DataFrame({'a': [1, 2, 3], 'b': ['x', 'y', 'z']})
>>> df = cudf.from_dataframe(pdf, allow_copy=True)
>>> type(df)
cudf.core.dataframe.DataFrame
>>> df
a b
0 1 x
1 2 y
2 3 z
Notes
-----
See https://data-apis.org/dataframe-protocol/latest/index.html
for the dataframe interchange protocol spec and API
"""
if isinstance(df, cudf.DataFrame):
return df
if not hasattr(df, "__dataframe__"):
raise ValueError("`df` does not support __dataframe__")
df = df.__dataframe__(allow_copy=allow_copy)
# Check number of chunks, if there's more than one we need to iterate
if df.num_chunks() > 1:
raise NotImplementedError("More than one chunk not handled yet")
# We need a dict of columns here, with each column being a cudf column.
columns = dict()
_buffers = [] # hold on to buffers, keeps memory alive
for name in df.column_names():
col = df.get_column_by_name(name)
if col.dtype[0] in (
_DtypeKind.INT,
_DtypeKind.UINT,
_DtypeKind.FLOAT,
_DtypeKind.BOOL,
):
columns[name], _buf = _protocol_to_cudf_column_numeric(
col, allow_copy
)
elif col.dtype[0] == _DtypeKind.CATEGORICAL:
columns[name], _buf = _protocol_to_cudf_column_categorical(
col, allow_copy
)
elif col.dtype[0] == _DtypeKind.STRING:
columns[name], _buf = _protocol_to_cudf_column_string(
col, allow_copy
)
else:
raise NotImplementedError(
f"Data type {col.dtype[0]} not handled yet"
)
_buffers.append(_buf)
df_new = cudf.DataFrame._from_data(columns)
df_new._buffers = _buffers
return df_new
def _protocol_to_cudf_column_numeric(
col, allow_copy: bool
) -> Tuple[
cudf.core.column.ColumnBase,
Mapping[str, Optional[Tuple[_CuDFBuffer, ProtoDtype]]],
]:
"""
Convert an int, uint, float or bool protocol column
to the corresponding cudf column
"""
if col.offset != 0:
raise NotImplementedError("column.offset > 0 not handled yet")
buffers = col.get_buffers()
assert buffers["data"] is not None, "data buffer should not be None"
_dbuffer, _ddtype = buffers["data"]
_dbuffer = _ensure_gpu_buffer(_dbuffer, _ddtype, allow_copy)
cudfcol_num = build_column(
_dbuffer._buf,
protocol_dtype_to_cupy_dtype(_ddtype),
)
return _set_missing_values(col, cudfcol_num, allow_copy), buffers
def _ensure_gpu_buffer(buf, data_type, allow_copy: bool) -> _CuDFBuffer:
# if `buf` is a (protocol) buffer that lives on the GPU already,
# return it as is. Otherwise, copy it to the device and return
# the resulting buffer.
if buf.__dlpack_device__()[0] != _Device.CUDA:
if allow_copy:
dbuf = rmm.DeviceBuffer(ptr=buf.ptr, size=buf.bufsize)
return _CuDFBuffer(
as_buffer(dbuf, exposed=True),
protocol_dtype_to_cupy_dtype(data_type),
allow_copy,
)
else:
raise TypeError(
"This operation must copy data from CPU to GPU. "
"Set `allow_copy=True` to allow it."
)
return buf
def _set_missing_values(
protocol_col,
cudf_col: cudf.core.column.ColumnBase,
allow_copy: bool,
) -> cudf.core.column.ColumnBase:
valid_mask = protocol_col.get_buffers()["validity"]
if valid_mask is not None:
null, invalid = protocol_col.describe_null
if null == _MaskKind.BYTEMASK:
valid_mask = _ensure_gpu_buffer(
valid_mask[0], valid_mask[1], allow_copy
)
boolmask = as_column(valid_mask._buf, dtype="bool")
bitmask = cudf._lib.transform.bools_to_mask(boolmask)
return cudf_col.set_mask(bitmask)
elif null == _MaskKind.BITMASK:
valid_mask = _ensure_gpu_buffer(
valid_mask[0], valid_mask[1], allow_copy
)
bitmask = valid_mask._buf
return cudf_col.set_mask(bitmask)
return cudf_col
def protocol_dtype_to_cupy_dtype(_dtype: ProtoDtype) -> cp.dtype:
kind = _dtype[0]
bitwidth = _dtype[1]
if _dtype[0] not in _SUPPORTED_KINDS:
raise RuntimeError(f"Data type {_dtype[0]} not handled yet")
return _CP_DTYPES[kind][bitwidth]
def _protocol_to_cudf_column_categorical(
col, allow_copy: bool
) -> Tuple[
cudf.core.column.ColumnBase,
Mapping[str, Optional[Tuple[_CuDFBuffer, ProtoDtype]]],
]:
"""
Convert a categorical column to a Series instance
"""
ordered, is_dict, categories = col.describe_categorical
if not is_dict:
raise NotImplementedError(
"Non-dictionary categoricals not supported yet"
)
buffers = col.get_buffers()
assert buffers["data"] is not None, "data buffer should not be None"
codes_buffer, codes_dtype = buffers["data"]
codes_buffer = _ensure_gpu_buffer(codes_buffer, codes_dtype, allow_copy)
cdtype = protocol_dtype_to_cupy_dtype(codes_dtype)
codes = build_column(
codes_buffer._buf,
cdtype,
)
cudfcol = build_categorical_column(
categories=categories,
codes=codes,
mask=codes.base_mask,
size=codes.size,
ordered=ordered,
)
return _set_missing_values(col, cudfcol, allow_copy), buffers
def _protocol_to_cudf_column_string(
col, allow_copy: bool
) -> Tuple[
cudf.core.column.ColumnBase,
Mapping[str, Optional[Tuple[_CuDFBuffer, ProtoDtype]]],
]:
"""
Convert a string ColumnObject to cudf Column object.
"""
# Retrieve the data buffers
buffers = col.get_buffers()
# Retrieve the data buffer containing the UTF-8 code units
assert buffers["data"] is not None, "data buffer should never be None"
data_buffer, data_dtype = buffers["data"]
data_buffer = _ensure_gpu_buffer(data_buffer, data_dtype, allow_copy)
encoded_string = build_column(
data_buffer._buf,
protocol_dtype_to_cupy_dtype(data_dtype),
)
# Retrieve the offsets buffer containing the index offsets demarcating
# the beginning and end of each string
assert buffers["offsets"] is not None, "not possible for string column"
offset_buffer, offset_dtype = buffers["offsets"]
offset_buffer = _ensure_gpu_buffer(offset_buffer, offset_dtype, allow_copy)
offsets = build_column(
offset_buffer._buf,
protocol_dtype_to_cupy_dtype(offset_dtype),
)
offsets = offsets.astype("int32")
cudfcol_str = build_column(
None, dtype=cp.dtype("O"), children=(offsets, encoded_string)
)
return _set_missing_values(col, cudfcol_str, allow_copy), buffers
def _protocol_buffer_to_cudf_buffer(protocol_buffer):
return as_buffer(
rmm.DeviceBuffer(
ptr=protocol_buffer.ptr, size=protocol_buffer.bufsize
),
exposed=True,
)
| 0 |
rapidsai_public_repos/cudf/python/cudf/cudf
|
rapidsai_public_repos/cudf/python/cudf/cudf/core/dataframe.py
|
# Copyright (c) 2018-2023, NVIDIA CORPORATION.
from __future__ import annotations
import functools
import inspect
import itertools
import numbers
import os
import pickle
import re
import sys
import textwrap
import warnings
from collections import abc, defaultdict
from collections.abc import Iterator
from typing import (
Any,
Callable,
Dict,
List,
MutableMapping,
Optional,
Set,
Tuple,
Union,
)
import cupy
import numba
import numpy as np
import pandas as pd
import pyarrow as pa
from nvtx import annotate
from packaging.version import Version
from pandas._config import get_option
from pandas.core.dtypes.common import is_float, is_integer
from pandas.io.formats import console
from pandas.io.formats.printing import pprint_thing
from typing_extensions import Self, assert_never
import cudf
import cudf.core.common
from cudf import _lib as libcudf
from cudf._typing import ColumnLike, Dtype, NotImplementedType
from cudf.api.extensions import no_default
from cudf.api.types import (
_is_scalar_or_zero_d_array,
is_bool_dtype,
is_categorical_dtype,
is_datetime_dtype,
is_dict_like,
is_dtype_equal,
is_list_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
is_struct_dtype,
)
from cudf.core import column, df_protocol, indexing_utils, reshape
from cudf.core.abc import Serializable
from cudf.core.column import (
CategoricalColumn,
ColumnBase,
as_column,
build_categorical_column,
build_column,
column_empty,
concat_columns,
)
from cudf.core.column_accessor import ColumnAccessor
from cudf.core.copy_types import BooleanMask
from cudf.core.groupby.groupby import DataFrameGroupBy, groupby_doc_template
from cudf.core.index import BaseIndex, RangeIndex, _index_from_data, as_index
from cudf.core.indexed_frame import (
IndexedFrame,
_FrameIndexer,
_get_label_range_or_mask,
_indices_from_labels,
doc_reset_index_template,
)
from cudf.core.join import Merge, MergeSemi
from cudf.core.missing import NA
from cudf.core.multiindex import MultiIndex
from cudf.core.resample import DataFrameResampler
from cudf.core.series import Series
from cudf.core.udf.row_function import _get_row_kernel
from cudf.utils import applyutils, docutils, ioutils, queryutils
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import (
can_convert_to_column,
cudf_dtype_from_pydata_dtype,
find_common_type,
is_column_like,
min_scalar_type,
numeric_normalize_types,
)
from cudf.utils.nvtx_annotation import _cudf_nvtx_annotate
from cudf.utils.utils import GetAttrGetItemMixin, _external_only_api
_cupy_nan_methods_map = {
"min": "nanmin",
"max": "nanmax",
"sum": "nansum",
"prod": "nanprod",
"product": "nanprod",
"mean": "nanmean",
"std": "nanstd",
"var": "nanvar",
}
_numeric_reduction_ops = (
"mean",
"min",
"max",
"sum",
"product",
"prod",
"std",
"var",
"kurtosis",
"kurt",
"skew",
)
def _shape_mismatch_error(x, y):
raise ValueError(
f"shape mismatch: value array of shape {x} "
f"could not be broadcast to indexing result of "
f"shape {y}"
)
class _DataFrameIndexer(_FrameIndexer):
def __getitem__(self, arg):
if (
isinstance(self._frame.index, MultiIndex)
or self._frame._data.multiindex
):
# This try/except block allows the use of pandas-like
# tuple arguments into MultiIndex dataframes.
try:
return self._getitem_tuple_arg(arg)
except (TypeError, KeyError, IndexError, ValueError):
return self._getitem_tuple_arg((arg, slice(None)))
else:
if not isinstance(arg, tuple):
arg = (arg, slice(None))
return self._getitem_tuple_arg(arg)
def __setitem__(self, key, value):
if not isinstance(key, tuple):
key = (key, slice(None))
return self._setitem_tuple_arg(key, value)
@_cudf_nvtx_annotate
def _can_downcast_to_series(self, df, arg):
"""
This method encapsulates the logic used
to determine whether or not the result of a loc/iloc
operation should be "downcasted" from a DataFrame to a
Series
"""
if isinstance(df, cudf.Series):
return False
nrows, ncols = df.shape
if nrows == 1:
if type(arg[0]) is slice:
if not is_scalar(arg[1]):
return False
elif (is_list_like(arg[0]) or is_column_like(arg[0])) and (
is_list_like(arg[1])
or is_column_like(arg[0])
or type(arg[1]) is slice
):
return False
else:
if is_bool_dtype(as_column(arg[0]).dtype) and not isinstance(
arg[1], slice
):
return True
dtypes = df.dtypes.values.tolist()
all_numeric = all(is_numeric_dtype(t) for t in dtypes)
if all_numeric or (
len(dtypes) and all(t == dtypes[0] for t in dtypes)
):
return True
if isinstance(arg[1], tuple):
return True
if ncols == 1:
if type(arg[1]) is slice:
return False
if isinstance(arg[1], tuple):
return len(arg[1]) == df._data.nlevels
if not (is_list_like(arg[1]) or is_column_like(arg[1])):
return True
return False
@_cudf_nvtx_annotate
def _downcast_to_series(self, df, arg):
"""
"Downcast" from a DataFrame to a Series
based on Pandas indexing rules
"""
nrows, ncols = df.shape
# determine the axis along which the Series is taken:
if nrows == 1 and ncols == 1:
if is_scalar(arg[0]) and (
is_scalar(arg[1])
or (df._data.multiindex and arg[1] in df._column_names)
):
return df[df._column_names[0]].iloc[0]
elif not is_scalar(arg[0]):
axis = 1
else:
axis = 0
elif nrows == 1:
axis = 0
elif ncols == 1:
axis = 1
else:
raise ValueError("Cannot downcast DataFrame selection to Series")
# take series along the axis:
if axis == 1:
return df[df._data.names[0]]
else:
if df._num_columns > 0:
dtypes = df.dtypes.values.tolist()
normalized_dtype = np.result_type(*dtypes)
for name, col in df._data.items():
df[name] = col.astype(normalized_dtype)
sr = df.T
return sr[sr._data.names[0]]
class _DataFrameLocIndexer(_DataFrameIndexer):
"""
For selection by label.
"""
@_cudf_nvtx_annotate
def _getitem_scalar(self, arg):
return self._frame[arg[1]].loc[arg[0]]
@_cudf_nvtx_annotate
def _getitem_tuple_arg(self, arg):
from uuid import uuid4
# Step 1: Gather columns
if isinstance(arg, tuple):
columns_df = self._frame._get_columns_by_label(arg[1])
columns_df._index = self._frame._index
else:
columns_df = self._frame
# Step 2: Gather rows
if isinstance(columns_df.index, MultiIndex):
if isinstance(arg, (MultiIndex, pd.MultiIndex)):
if isinstance(arg, pd.MultiIndex):
arg = MultiIndex.from_pandas(arg)
indices = _indices_from_labels(columns_df, arg)
return columns_df.take(indices)
else:
if isinstance(arg, tuple):
row_arg = arg[0]
elif is_scalar(arg):
row_arg = (arg,)
else:
row_arg = arg
result = columns_df.index._get_row_major(columns_df, row_arg)
if (
len(result) == 1
and isinstance(arg, tuple)
and len(arg) > 1
and is_scalar(arg[1])
):
return result._data.columns[0].element_indexing(0)
return result
else:
if isinstance(arg[0], slice):
out = _get_label_range_or_mask(
columns_df.index, arg[0].start, arg[0].stop, arg[0].step
)
if isinstance(out, slice):
df = columns_df._slice(out)
else:
df = columns_df._apply_boolean_mask(
BooleanMask.from_column_unchecked(
cudf.core.column.as_column(out)
)
)
else:
tmp_arg = arg
if is_scalar(arg[0]):
# If a scalar, there is possibility of having duplicates.
# Join would get all the duplicates. So, converting it to
# an array kind.
if cudf.get_option("mode.pandas_compatible"):
if any(
c.dtype != columns_df._columns[0].dtype
for c in columns_df._columns
):
raise TypeError(
"All columns need to be of same type, please "
"typecast to common dtype."
)
tmp_arg = ([tmp_arg[0]], tmp_arg[1])
if len(tmp_arg[0]) == 0:
return columns_df._empty_like(keep_index=True)
tmp_arg = (
as_column(
tmp_arg[0],
dtype=self._frame.index.dtype
if is_categorical_dtype(self._frame.index.dtype)
else None,
),
tmp_arg[1],
)
if is_bool_dtype(tmp_arg[0]):
df = columns_df._apply_boolean_mask(
BooleanMask(tmp_arg[0], len(columns_df))
)
else:
tmp_col_name = str(uuid4())
cantor_name = "_" + "_".join(
map(str, columns_df._data.names)
)
if columns_df._data.multiindex:
# column names must be appropriate length tuples
extra = tuple(
"" for _ in range(columns_df._data.nlevels - 1)
)
tmp_col_name = (tmp_col_name, *extra)
cantor_name = (cantor_name, *extra)
other_df = DataFrame(
{tmp_col_name: column.arange(len(tmp_arg[0]))},
index=as_index(tmp_arg[0]),
)
columns_df[cantor_name] = column.arange(len(columns_df))
df = other_df.join(columns_df, how="inner")
# as join is not assigning any names to index,
# update it over here
df.index.name = columns_df.index.name
df = df.sort_values(by=[tmp_col_name, cantor_name])
df.drop(columns=[tmp_col_name, cantor_name], inplace=True)
# There were no indices found
if len(df) == 0:
raise KeyError(arg)
# Step 3: Downcast
if self._can_downcast_to_series(df, arg):
return self._downcast_to_series(df, arg)
return df
@_cudf_nvtx_annotate
def _setitem_tuple_arg(self, key, value):
if (
isinstance(self._frame.index, MultiIndex)
or self._frame._data.multiindex
):
raise NotImplementedError(
"Setting values using df.loc[] not supported on "
"DataFrames with a MultiIndex"
)
try:
columns_df = self._frame._get_columns_by_label(key[1])
except KeyError:
if not self._frame.empty and isinstance(key[0], slice):
pos_range = _get_label_range_or_mask(
self._frame.index, key[0].start, key[0].stop, key[0].step
)
idx = self._frame.index[pos_range]
elif self._frame.empty and isinstance(key[0], slice):
idx = None
else:
if is_scalar(key[0]):
arr = [key[0]]
else:
arr = key[0]
idx = cudf.Index(arr)
if is_scalar(value):
length = len(idx) if idx is not None else 1
value = as_column(value, length=length)
new_col = cudf.Series(value, index=idx)
if not self._frame.empty:
new_col = new_col._align_to_index(
self._frame.index, how="right"
)
if self._frame.empty:
self._frame.index = (
idx if idx is not None else cudf.RangeIndex(len(new_col))
)
self._frame._data.insert(key[1], new_col)
else:
if is_scalar(value):
for col in columns_df._column_names:
self._frame[col].loc[key[0]] = value
elif isinstance(value, cudf.DataFrame):
if value.shape != self._frame.loc[key[0]].shape:
_shape_mismatch_error(
value.shape,
self._frame.loc[key[0]].shape,
)
value_column_names = set(value._column_names)
scatter_map = _indices_from_labels(self._frame, key[0])
for col in columns_df._column_names:
columns_df[col][scatter_map] = (
value._data[col] if col in value_column_names else NA
)
else:
value = cupy.asarray(value)
if cupy.ndim(value) == 2:
# If the inner dimension is 1, it's broadcastable to
# all columns of the dataframe.
indexed_shape = columns_df.loc[key[0]].shape
if value.shape[1] == 1:
if value.shape[0] != indexed_shape[0]:
_shape_mismatch_error(value.shape, indexed_shape)
for i, col in enumerate(columns_df._column_names):
self._frame[col].loc[key[0]] = value[:, 0]
else:
if value.shape != indexed_shape:
_shape_mismatch_error(value.shape, indexed_shape)
for i, col in enumerate(columns_df._column_names):
self._frame[col].loc[key[0]] = value[:, i]
else:
# handle cases where value is 1d object:
# If the key on column axis is a scalar, we indexed
# a single column; The 1d value should assign along
# the columns.
if is_scalar(key[1]):
for col in columns_df._column_names:
self._frame[col].loc[key[0]] = value
# Otherwise, there are two situations. The key on row axis
# can be a scalar or 1d. In either of the situation, the
# ith element in value corresponds to the ith row in
# the indexed object.
# If the key is 1d, a broadcast will happen.
else:
for i, col in enumerate(columns_df._column_names):
self._frame[col].loc[key[0]] = value[i]
class _DataFrameIlocIndexer(_DataFrameIndexer):
"""
For selection by index.
"""
_frame: DataFrame
def __getitem__(self, arg):
row_key, (
col_is_scalar,
column_names,
) = indexing_utils.destructure_dataframe_iloc_indexer(arg, self._frame)
row_spec = indexing_utils.parse_row_iloc_indexer(
row_key, len(self._frame)
)
ca = self._frame._data
index = self._frame.index
if col_is_scalar:
s = Series._from_data(
ca._select_by_names(column_names), index=index
)
return s._getitem_preprocessed(row_spec)
if column_names != list(self._frame._column_names):
frame = self._frame._from_data(
ca._select_by_names(column_names), index=index
)
else:
frame = self._frame
if isinstance(row_spec, indexing_utils.MapIndexer):
return frame._gather(row_spec.key, keep_index=True)
elif isinstance(row_spec, indexing_utils.MaskIndexer):
return frame._apply_boolean_mask(row_spec.key, keep_index=True)
elif isinstance(row_spec, indexing_utils.SliceIndexer):
return frame._slice(row_spec.key)
elif isinstance(row_spec, indexing_utils.ScalarIndexer):
result = frame._gather(row_spec.key, keep_index=True)
# Attempt to turn into series.
try:
# Behaviour difference from pandas, which will merrily
# turn any heterogeneous set of columns into a series if
# you only ask for one row.
new_name = result.index[0]
result = Series._concat(
[result[name] for name in column_names],
index=result.keys(),
)
result.name = new_name
return result
except TypeError:
# Couldn't find a common type, Hence:
# Raise in pandas compatibility mode,
# or just return a 1xN dataframe otherwise
if cudf.get_option("mode.pandas_compatible"):
raise TypeError(
"All columns need to be of same type, please "
"typecast to common dtype."
)
return result
elif isinstance(row_spec, indexing_utils.EmptyIndexer):
return frame._empty_like(keep_index=True)
assert_never(row_spec)
@_cudf_nvtx_annotate
def _setitem_tuple_arg(self, key, value):
columns_df = self._frame._from_data(
self._frame._data.select_by_index(key[1]), self._frame._index
)
if is_scalar(value):
for col in columns_df._column_names:
self._frame[col].iloc[key[0]] = value
elif isinstance(value, cudf.DataFrame):
if value.shape != self._frame.iloc[key[0]].shape:
_shape_mismatch_error(
value.shape,
self._frame.loc[key[0]].shape,
)
value_column_names = set(value._column_names)
for col in columns_df._column_names:
columns_df[col][key[0]] = (
value._data[col] if col in value_column_names else NA
)
else:
# TODO: consolidate code path with identical counterpart
# in `_DataFrameLocIndexer._setitem_tuple_arg`
value = cupy.asarray(value)
if cupy.ndim(value) == 2:
indexed_shape = columns_df.iloc[key[0]].shape
if value.shape[1] == 1:
if value.shape[0] != indexed_shape[0]:
_shape_mismatch_error(value.shape, indexed_shape)
for i, col in enumerate(columns_df._column_names):
self._frame[col].iloc[key[0]] = value[:, 0]
else:
if value.shape != indexed_shape:
_shape_mismatch_error(value.shape, indexed_shape)
for i, col in enumerate(columns_df._column_names):
self._frame._data[col][key[0]] = value[:, i]
else:
if is_scalar(key[1]):
for col in columns_df._column_names:
self._frame[col].iloc[key[0]] = value
else:
for i, col in enumerate(columns_df._column_names):
self._frame[col].iloc[key[0]] = value[i]
class DataFrame(IndexedFrame, Serializable, GetAttrGetItemMixin):
"""
A GPU Dataframe object.
Parameters
----------
data : array-like, Iterable, dict, or DataFrame.
Dict can contain Series, arrays, constants, or list-like objects.
index : Index or array-like
Index to use for resulting frame. Will default to
RangeIndex if no indexing information part of input data and
no index provided.
columns : Index or array-like
Column labels to use for resulting frame.
Will default to RangeIndex (0, 1, 2, …, n) if no column
labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed.
If None, infer.
nan_as_null : bool, Default True
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Examples
--------
Build dataframe with ``__setitem__``:
>>> import cudf
>>> df = cudf.DataFrame()
>>> df['key'] = [0, 1, 2, 3, 4]
>>> df['val'] = [float(i + 10) for i in range(5)] # insert column
>>> df
key val
0 0 10.0
1 1 11.0
2 2 12.0
3 3 13.0
4 4 14.0
Build DataFrame via dict of columns:
>>> import numpy as np
>>> from datetime import datetime, timedelta
>>> t0 = datetime.strptime('2018-10-07 12:00:00', '%Y-%m-%d %H:%M:%S')
>>> n = 5
>>> df = cudf.DataFrame({
... 'id': np.arange(n),
... 'datetimes': np.array(
... [(t0+ timedelta(seconds=x)) for x in range(n)])
... })
>>> df
id datetimes
0 0 2018-10-07 12:00:00
1 1 2018-10-07 12:00:01
2 2 2018-10-07 12:00:02
3 3 2018-10-07 12:00:03
4 4 2018-10-07 12:00:04
Build DataFrame via list of rows as tuples:
>>> df = cudf.DataFrame([
... (5, "cats", "jump", np.nan),
... (2, "dogs", "dig", 7.5),
... (3, "cows", "moo", -2.1, "occasionally"),
... ])
>>> df
0 1 2 3 4
0 5 cats jump <NA> <NA>
1 2 dogs dig 7.5 <NA>
2 3 cows moo -2.1 occasionally
Convert from a Pandas DataFrame:
>>> import pandas as pd
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> pdf
a b
0 0 0.1
1 1 0.2
2 2 NaN
3 3 0.3
>>> df = cudf.from_pandas(pdf)
>>> df
a b
0 0 0.1
1 1 0.2
2 2 <NA>
3 3 0.3
"""
_PROTECTED_KEYS = frozenset(("_data", "_index"))
_accessors: Set[Any] = set()
_loc_indexer_type = _DataFrameLocIndexer
_iloc_indexer_type = _DataFrameIlocIndexer
_groupby = DataFrameGroupBy
_resampler = DataFrameResampler
@_cudf_nvtx_annotate
def __init__(
self, data=None, index=None, columns=None, dtype=None, nan_as_null=True
):
super().__init__()
if isinstance(columns, (Series, cudf.BaseIndex)):
columns = columns.to_pandas()
if isinstance(data, (DataFrame, pd.DataFrame)):
if isinstance(data, pd.DataFrame):
data = self.from_pandas(data, nan_as_null=nan_as_null)
if index is not None:
if not data.index.equals(index):
data = data.reindex(index)
index = data._index
else:
index = as_index(index)
else:
index = data._index
self._index = index
if columns is not None:
self._data = data._data
self._reindex(
column_names=columns, index=index, deep=False, inplace=True
)
if isinstance(
columns, (range, pd.RangeIndex, cudf.RangeIndex)
):
self._data.rangeindex = True
else:
self._data = data._data
self._data.rangeindex = True
elif isinstance(data, (cudf.Series, pd.Series)):
if isinstance(data, pd.Series):
data = cudf.Series.from_pandas(data, nan_as_null=nan_as_null)
# Series.name is not None and Series.name in columns
# -> align
# Series.name is not None and Series.name not in columns
# -> return empty DataFrame
# Series.name is None and no columns
# -> return 1 column DataFrame
# Series.name is None and columns
# -> return 1 column DataFrame if len(columns) in {0, 1}
if data.name is None and columns is not None:
if len(columns) > 1:
raise ValueError(
"Length of columns must be less than 2 if "
f"{type(data).__name__}.name is None."
)
name = columns[0]
else:
name = data.name or 0
self._init_from_dict_like(
{name: data},
index=index,
columns=columns,
nan_as_null=nan_as_null,
)
elif data is None:
if index is None:
self._index = RangeIndex(0)
else:
self._index = as_index(index)
if columns is not None:
rangeindex = isinstance(
columns, (range, pd.RangeIndex, cudf.RangeIndex)
)
label_dtype = getattr(columns, "dtype", None)
self._data = ColumnAccessor(
{
k: column.column_empty(
len(self), dtype="object", masked=True
)
for k in columns
},
level_names=tuple(columns.names)
if isinstance(columns, pd.Index)
else None,
rangeindex=rangeindex,
label_dtype=label_dtype,
)
elif isinstance(data, ColumnAccessor):
raise TypeError(
"Use cudf.Series._from_data for constructing a Series from "
"ColumnAccessor"
)
elif hasattr(data, "__cuda_array_interface__"):
arr_interface = data.__cuda_array_interface__
# descr is an optional field of the _cuda_ary_iface_
if "descr" in arr_interface:
if len(arr_interface["descr"]) == 1:
new_df = self._from_arrays(
data, index=index, columns=columns
)
else:
new_df = self.from_records(
data, index=index, columns=columns
)
else:
new_df = self._from_arrays(data, index=index, columns=columns)
self._data = new_df._data
self._index = new_df._index
self._check_data_index_length_match()
elif hasattr(data, "__array_interface__"):
arr_interface = data.__array_interface__
if len(arr_interface["descr"]) == 1:
# not record arrays
new_df = self._from_arrays(data, index=index, columns=columns)
else:
new_df = self.from_records(data, index=index, columns=columns)
self._data = new_df._data
self._index = new_df._index
self._check_data_index_length_match()
else:
if isinstance(data, Iterator):
data = list(data)
if is_list_like(data):
if len(data) > 0 and is_scalar(data[0]):
if columns is not None:
data = dict(zip(columns, [data]))
rangeindex = isinstance(
columns, (range, pd.RangeIndex, cudf.RangeIndex)
)
else:
data = dict(enumerate([data]))
rangeindex = True
new_df = DataFrame(data=data, index=index)
self._data = new_df._data
self._index = new_df._index
self._data._level_names = (
tuple(columns.names)
if isinstance(columns, pd.Index)
else self._data._level_names
)
self._data.rangeindex = rangeindex
elif len(data) > 0 and isinstance(data[0], Series):
self._init_from_series_list(
data=data, columns=columns, index=index
)
else:
self._init_from_list_like(
data, index=index, columns=columns
)
self._check_data_index_length_match()
else:
if not is_dict_like(data):
raise TypeError("data must be list or dict-like")
self._init_from_dict_like(
data, index=index, columns=columns, nan_as_null=nan_as_null
)
self._check_data_index_length_match()
if dtype:
self._data = self.astype(dtype)._data
self._data.multiindex = self._data.multiindex or isinstance(
columns, pd.MultiIndex
)
@_cudf_nvtx_annotate
def _init_from_series_list(self, data, columns, index):
if index is None:
# When `index` is `None`, the final index of
# resulting dataframe will be union of
# all Series's names.
final_index = as_index(_get_union_of_series_names(data))
else:
# When an `index` is passed, the final index of
# resulting dataframe will be whatever
# index passed, but will need
# shape validations - explained below
data_length = len(data)
index_length = len(index)
if data_length != index_length:
# If the passed `index` length doesn't match
# length of Series objects in `data`, we must
# check if `data` can be duplicated/expanded
# to match the length of index. For that we
# check if the length of index is a factor
# of length of data.
#
# 1. If yes, we extend data
# until length of data is equal to length of index.
# 2. If no, we throw an error stating the
# shape of resulting `data` and `index`
# Simple example
# >>> import pandas as pd
# >>> s = pd.Series([1, 2, 3])
# >>> pd.DataFrame([s], index=['a', 'b'])
# 0 1 2
# a 1 2 3
# b 1 2 3
# >>> pd.DataFrame([s], index=['a', 'b', 'c'])
# 0 1 2
# a 1 2 3
# b 1 2 3
# c 1 2 3
if index_length % data_length == 0:
initial_data = data
data = []
for _ in range(int(index_length / data_length)):
data.extend([o for o in initial_data])
else:
raise ValueError(
f"Length of values ({data_length}) does "
f"not match length of index ({index_length})"
)
final_index = as_index(index)
series_lengths = list(map(len, data))
data = numeric_normalize_types(*data)
if series_lengths.count(series_lengths[0]) == len(series_lengths):
# Calculating the final dataframe columns by
# getting union of all `index` of the Series objects.
final_columns = _get_union_of_indices([d.index for d in data])
if isinstance(final_columns, cudf.RangeIndex):
self._data.rangeindex = True
for idx, series in enumerate(data):
if not series.index.is_unique:
raise ValueError(
"Reindexing only valid with uniquely valued Index "
"objects"
)
if not series.index.equals(final_columns):
series = series.reindex(final_columns)
self._data[idx] = column.as_column(series._column)
# Setting `final_columns` to self._index so
# that the resulting `transpose` will be have
# columns set to `final_columns`
self._index = as_index(final_columns)
transpose = self.T
else:
concat_df = cudf.concat(data, axis=1)
cols = concat_df._data.to_pandas_index()
if cols.dtype == "object":
concat_df.columns = cols.astype("str")
transpose = concat_df.T
transpose._index = final_index
self._data = transpose._data
self._index = transpose._index
# If `columns` is passed, the result dataframe
# contain a dataframe with only the
# specified `columns` in the same order.
if columns is not None:
for col_name in columns:
if col_name not in self._data:
self._data[col_name] = column.column_empty(
row_count=len(self), dtype=None, masked=True
)
self._data._level_names = (
tuple(columns.names)
if isinstance(columns, pd.Index)
else self._data._level_names
)
self._data = self._data.select_by_label(columns)
self._data.rangeindex = isinstance(
columns, (range, cudf.RangeIndex, pd.RangeIndex)
)
else:
self._data.rangeindex = True
@_cudf_nvtx_annotate
def _init_from_list_like(self, data, index=None, columns=None):
if index is None:
index = RangeIndex(start=0, stop=len(data))
else:
index = as_index(index)
self._index = as_index(index)
# list-of-dicts case
if len(data) > 0 and isinstance(data[0], dict):
data = DataFrame.from_pandas(pd.DataFrame(data))
self._data = data._data
# interval in a list
elif len(data) > 0 and isinstance(data[0], pd.Interval):
data = DataFrame.from_pandas(pd.DataFrame(data))
self._data = data._data
elif any(
not isinstance(col, (abc.Iterable, abc.Sequence)) for col in data
):
raise TypeError("Inputs should be an iterable or sequence.")
elif len(data) > 0 and not can_convert_to_column(data[0]):
raise ValueError("Must pass 2-d input.")
else:
if (
len(data) > 0
and columns is None
and isinstance(data[0], tuple)
and hasattr(data[0], "_fields")
):
# pandas behavior is to use the fields from the first
# namedtuple as the column names
columns = data[0]._fields
data = list(itertools.zip_longest(*data))
if columns is not None and len(data) == 0:
data = [
cudf.core.column.column_empty(row_count=0, dtype=None)
for _ in columns
]
for col_name, col in enumerate(data):
self._data[col_name] = column.as_column(col)
self._data.rangeindex = True
if columns is not None:
if len(columns) != len(data):
raise ValueError(
f"Shape of passed values is ({len(index)}, {len(data)}), "
f"indices imply ({len(index)}, {len(columns)})."
)
self.columns = columns
self._data.rangeindex = isinstance(
columns, (range, pd.RangeIndex, cudf.RangeIndex)
)
self._data.label_dtype = getattr(columns, "dtype", None)
@_cudf_nvtx_annotate
def _init_from_dict_like(
self, data, index=None, columns=None, nan_as_null=None
):
label_dtype = None
if columns is not None:
label_dtype = getattr(columns, "dtype", None)
# remove all entries in data that are not in columns,
# inserting new empty columns for entries in columns that
# are not in data
if any(c in data for c in columns):
# Let the downstream logic determine the length of the
# empty columns here
empty_column = lambda: None # noqa: E731
else:
# If keys is empty, none of the data keys match the
# columns, so we need to create an empty DataFrame. To
# match pandas, the size of the dataframe must match
# the provided index, so we need to return a masked
# array of nulls if an index is given.
empty_column = functools.partial(
cudf.core.column.column_empty,
row_count=(0 if index is None else len(index)),
dtype=None,
masked=index is not None,
)
data = {
c: data[c] if c in data else empty_column() for c in columns
}
data, index = self._align_input_series_indices(data, index=index)
if index is None:
num_rows = 0
if data:
keys, values, lengths = zip(
*(
(k, v, 1)
if is_scalar(v)
else (
k,
vc := as_column(v, nan_as_null=nan_as_null),
len(vc),
)
for k, v in data.items()
)
)
data = dict(zip(keys, values))
try:
(num_rows,) = (set(lengths) - {1}) or {1}
except ValueError:
raise ValueError("All arrays must be the same length")
self._index = RangeIndex(0, num_rows)
else:
self._index = as_index(index)
if len(data):
self._data.multiindex = True
for i, col_name in enumerate(data):
self._data.multiindex = self._data.multiindex and isinstance(
col_name, tuple
)
self._insert(
i,
col_name,
data[col_name],
nan_as_null=nan_as_null,
)
self._data._level_names = (
tuple(columns.names)
if isinstance(columns, pd.Index)
else self._data._level_names
)
self._data.label_dtype = label_dtype
@classmethod
def _from_data(
cls,
data: MutableMapping,
index: Optional[BaseIndex] = None,
columns: Any = None,
) -> DataFrame:
out = super()._from_data(data=data, index=index)
if columns is not None:
out.columns = columns
return out
@staticmethod
@_cudf_nvtx_annotate
def _align_input_series_indices(data, index):
data = data.copy()
input_series = [
Series(val)
for val in data.values()
if isinstance(val, (pd.Series, Series, dict))
]
if input_series:
if index is not None:
aligned_input_series = [
sr._align_to_index(index, how="right", sort=False)
for sr in input_series
]
else:
aligned_input_series = cudf.core.series._align_indices(
input_series
)
index = aligned_input_series[0].index
for name, val in data.items():
if isinstance(val, (pd.Series, Series, dict)):
data[name] = aligned_input_series.pop(0)
return data, index
# The `constructor*` properties are used by `dask` (and `dask_cudf`)
@property
def _constructor(self):
return DataFrame
@property
def _constructor_sliced(self):
return Series
@property
def _constructor_expanddim(self):
raise NotImplementedError(
"_constructor_expanddim not supported for DataFrames!"
)
def serialize(self):
header, frames = super().serialize()
header["index"], index_frames = self._index.serialize()
header["index_frame_count"] = len(index_frames)
# For backwards compatibility with older versions of cuDF, index
# columns are placed before data columns.
frames = index_frames + frames
return header, frames
@classmethod
def deserialize(cls, header, frames):
index_nframes = header["index_frame_count"]
obj = super().deserialize(
header, frames[header["index_frame_count"] :]
)
idx_typ = pickle.loads(header["index"]["type-serialized"])
index = idx_typ.deserialize(header["index"], frames[:index_nframes])
obj._index = index
return obj
@property
@_cudf_nvtx_annotate
def shape(self):
"""Returns a tuple representing the dimensionality of the DataFrame."""
return self._num_rows, self._num_columns
@property
def dtypes(self):
"""
Return the dtypes in this object.
Returns
-------
pandas.Series
The data type of each column.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> df = cudf.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df
float int datetime string
0 1.0 1 2018-03-10 foo
>>> df.dtypes
float float64
int int64
datetime datetime64[us]
string object
dtype: object
"""
return pd.Series(self._dtypes, dtype="object")
@property
def ndim(self):
"""Dimension of the data. DataFrame ndim is always 2."""
return 2
def __dir__(self):
# Add the columns of the DataFrame to the dir output.
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(
c
for c in self._column_names
if isinstance(c, str) and c.isidentifier()
)
return list(o)
def __setattr__(self, key, col):
try:
# Preexisting attributes may be set. We cannot rely on checking the
# `_PROTECTED_KEYS` because we must also allow for settable
# properties, and we must call object.__getattribute__ to bypass
# the `__getitem__` behavior inherited from `GetAttrGetItemMixin`.
object.__getattribute__(self, key)
except AttributeError:
if key not in self._PROTECTED_KEYS:
try:
# Check key existence.
self[key]
# If a column already exists, set it.
self[key] = col
return
except KeyError:
pass
# Set a new attribute that is not already a column.
super().__setattr__(key, col)
except RuntimeError as e:
# TODO: This allows setting properties that are marked as forbidden
# for internal usage. It is necessary because the __getattribute__
# call in the try block will trigger the error. We should see if
# setting these variables can also always be disabled
if "External-only API" not in str(e):
raise
super().__setattr__(key, col)
else:
super().__setattr__(key, col)
@_cudf_nvtx_annotate
def __getitem__(self, arg):
"""
If *arg* is a ``str`` or ``int`` type, return the column Series.
If *arg* is a ``slice``, return a new DataFrame with all columns
sliced to the specified range.
If *arg* is an ``array`` containing column names, return a new
DataFrame with the corresponding columns.
If *arg* is a ``dtype.bool array``, return the rows marked True
Examples
--------
>>> df = cudf.DataFrame({
... 'a': list(range(10)),
... 'b': list(range(10)),
... 'c': list(range(10)),
... })
Get first 4 rows of all columns.
>>> df[:4]
a b c
0 0 0 0
1 1 1 1
2 2 2 2
3 3 3 3
Get last 5 rows of all columns.
>>> df[-5:]
a b c
5 5 5 5
6 6 6 6
7 7 7 7
8 8 8 8
9 9 9 9
Get columns a and c.
>>> df[['a', 'c']]
a c
0 0 0
1 1 1
2 2 2
3 3 3
4 4 4
5 5 5
6 6 6
7 7 7
8 8 8
9 9 9
Return the rows specified in the boolean mask.
>>> df[[True, False, True, False, True,
... False, True, False, True, False]]
a b c
0 0 0 0
2 2 2 2
4 4 4 4
6 6 6 6
8 8 8 8
"""
if _is_scalar_or_zero_d_array(arg) or isinstance(arg, tuple):
return self._get_columns_by_label(arg, downcast=True)
elif isinstance(arg, slice):
return self._slice(arg)
elif can_convert_to_column(arg):
mask = arg
if is_list_like(mask):
# An explicit dtype is needed to avoid pandas warnings from
# empty sets of columns. This shouldn't be needed in pandas
# 2.0, we don't need to specify a dtype when we know we're not
# trying to match any columns so the default is fine.
dtype = None
if len(mask) == 0:
assert Version(pd.__version__) < Version("2.0.0")
dtype = "float64"
mask = pd.Series(mask, dtype=dtype)
if mask.dtype == "bool":
return self._apply_boolean_mask(BooleanMask(mask, len(self)))
else:
return self._get_columns_by_label(mask)
elif isinstance(arg, DataFrame):
return self.where(arg)
else:
raise TypeError(
f"__getitem__ on type {type(arg)} is not supported"
)
@_cudf_nvtx_annotate
def __setitem__(self, arg, value):
"""Add/set column by *arg or DataFrame*"""
if isinstance(arg, DataFrame):
# not handling set_item where arg = df & value = df
if isinstance(value, DataFrame):
raise TypeError(
f"__setitem__ with arg = {type(value)} and "
f"value = {type(arg)} is not supported"
)
else:
for col_name in self._data:
scatter_map = arg._data[col_name]
if is_scalar(value):
self._data[col_name][scatter_map] = value
else:
self._data[col_name][scatter_map] = column.as_column(
value
)[scatter_map]
elif is_scalar(arg) or isinstance(arg, tuple):
if isinstance(value, DataFrame):
_setitem_with_dataframe(
input_df=self,
replace_df=value,
input_cols=[arg],
mask=None,
)
else:
if arg in self._data:
if not is_scalar(value) and len(self) == 0:
if isinstance(value, (pd.Series, Series)):
self._index = as_index(value.index)
elif len(value) > 0:
self._index = RangeIndex(start=0, stop=len(value))
value = column.as_column(value)
new_data = self._data.__class__()
for key in self._data:
if key == arg:
new_data[key] = value
else:
new_data[key] = column.column_empty_like(
self._data[key],
masked=True,
newsize=len(value),
)
self._data = new_data
return
elif isinstance(value, (pd.Series, Series)):
value = Series(value)._align_to_index(
self._index,
how="right",
sort=False,
allow_non_unique=True,
)
if is_scalar(value):
self._data[arg] = column.full(len(self), value)
else:
value = as_column(value)
self._data[arg] = value
else:
# disc. with pandas here
# pandas raises key error here
self.insert(len(self._data), arg, value)
elif can_convert_to_column(arg):
mask = arg
if is_list_like(mask):
mask = np.array(mask)
if mask.dtype == "bool":
mask = column.as_column(arg)
if isinstance(value, DataFrame):
_setitem_with_dataframe(
input_df=self,
replace_df=value,
input_cols=None,
mask=mask,
)
else:
if not is_scalar(value):
value = column.as_column(value)[mask]
for col_name in self._data:
self._data[col_name][mask] = value
else:
if isinstance(value, (cupy.ndarray, np.ndarray)):
_setitem_with_dataframe(
input_df=self,
replace_df=cudf.DataFrame(value),
input_cols=arg,
mask=None,
ignore_index=True,
)
elif isinstance(value, DataFrame):
_setitem_with_dataframe(
input_df=self,
replace_df=value,
input_cols=arg,
mask=None,
)
else:
for col in arg:
if is_scalar(value):
self._data[col] = column.full(
size=len(self), fill_value=value
)
else:
self._data[col] = column.as_column(value)
else:
raise TypeError(
f"__setitem__ on type {type(arg)} is not supported"
)
def __delitem__(self, name):
self._drop_column(name)
@_cudf_nvtx_annotate
def memory_usage(self, index=True, deep=False):
mem_usage = [col.memory_usage for col in self._data.columns]
names = [str(name) for name in self._data.names]
if index:
mem_usage.append(self._index.memory_usage())
names.append("Index")
return Series._from_data(
data={None: as_column(mem_usage)},
index=as_index(names),
)
@_cudf_nvtx_annotate
def __array_function__(self, func, types, args, kwargs):
if "out" in kwargs or not all(
issubclass(t, (Series, DataFrame)) for t in types
):
return NotImplemented
try:
if func.__name__ in {"any", "all"}:
# NumPy default for `axis` is
# different from `cudf`/`pandas`
# hence need this special handling.
kwargs.setdefault("axis", None)
if cudf_func := getattr(self.__class__, func.__name__, None):
out = cudf_func(*args, **kwargs)
# The dot product of two DataFrames returns an array in pandas.
if (
func is np.dot
and isinstance(args[0], (DataFrame, pd.DataFrame))
and isinstance(args[1], (DataFrame, pd.DataFrame))
):
return out.values
return out
except Exception:
# The rare instance where a "silent" failure is preferable. Except
# in the (highly unlikely) case that some other library
# interoperates with cudf objects, the result will be that numpy
# raises a TypeError indicating that the operation is not
# implemented, which is much friendlier than an arbitrary internal
# cudf error.
pass
return NotImplemented
# The _get_numeric_data method is necessary for dask compatibility.
@_cudf_nvtx_annotate
def _get_numeric_data(self):
"""Return a dataframe with only numeric data types"""
columns = [
c
for c, dt in self.dtypes.items()
if dt != object and not is_categorical_dtype(dt)
]
return self[columns]
@_cudf_nvtx_annotate
def assign(self, **kwargs: Union[Callable[[Self], Any], Any]):
"""
Assign columns to DataFrame from keyword arguments.
Parameters
----------
**kwargs: dict mapping string column names to values
The value for each key can either be a literal column (or
something that can be converted to a column), or
a callable of one argument that will be given the
dataframe as an argument and should return the new column
(without modifying the input argument).
Columns are added in-order, so callables can refer to
column names constructed in the assignment.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame()
>>> df = df.assign(a=[0, 1, 2], b=[3, 4, 5])
>>> df
a b
0 0 3
1 1 4
2 2 5
"""
new_df = self.copy(deep=False)
for k, v in kwargs.items():
new_df[k] = v(new_df) if callable(v) else v
return new_df
@classmethod
@_cudf_nvtx_annotate
def _concat(
cls, objs, axis=0, join="outer", ignore_index=False, sort=False
):
# flag to indicate at least one empty input frame also has an index
empty_has_index = False
# length of output frame's RangeIndex if all input frames are empty,
# and at least one has an index
result_index_length = 0
# the number of empty input frames
num_empty_input_frames = 0
# flag to indicate if all DataFrame's have
# RangeIndex as their index
are_all_range_index = False
for i, obj in enumerate(objs):
# shallow-copy the input DFs in case the same DF instance
# is concatenated with itself
objs[i] = obj.copy(deep=False)
# If ignore_index is true, determine if
# all or some objs are empty(and have index).
# 1. If all objects are empty(and have index), we
# should set the index separately using RangeIndex.
# 2. If some objects are empty(and have index), we
# create empty columns later while populating `columns`
# variable. Detailed explanation of second case before
# allocation of `columns` variable below.
if ignore_index and obj.empty:
num_empty_input_frames += 1
result_index_length += len(obj)
empty_has_index = empty_has_index or len(obj) > 0
are_all_range_index = (
True if i == 0 else are_all_range_index
) and isinstance(obj.index, cudf.RangeIndex)
if join == "inner":
sets_of_column_names = [set(obj._column_names) for obj in objs]
intersecting_columns = functools.reduce(
set.intersection, sets_of_column_names
)
union_of_columns = functools.reduce(
set.union, sets_of_column_names
)
non_intersecting_columns = union_of_columns.symmetric_difference(
intersecting_columns
)
# Get an ordered list of the intersecting columns to preserve input
# order, which is promised by pandas for inner joins.
ordered_intersecting_columns = [
name
for obj in objs
for name in obj._column_names
if name in intersecting_columns
]
names = dict.fromkeys(ordered_intersecting_columns).keys()
if axis == 0:
if ignore_index and (
num_empty_input_frames > 0
or len(intersecting_columns) == 0
):
# When ignore_index is True and if there is
# at least 1 empty dataframe and no
# intersecting columns are present, an empty dataframe
# needs to be returned just with an Index.
empty_has_index = True
num_empty_input_frames = len(objs)
result_index_length = sum(len(obj) for obj in objs)
# remove columns not present in all objs
for obj in objs:
obj.drop(
columns=non_intersecting_columns,
inplace=True,
errors="ignore",
)
elif join == "outer":
# Get a list of the unique table column names
names = [name for f in objs for name in f._column_names]
names = dict.fromkeys(names).keys()
else:
raise ValueError(
"Only can inner (intersect) or outer (union) when joining"
"the other axis"
)
if sort:
try:
# Sorted always returns a list, but will fail to sort if names
# include different types that are not comparable.
names = sorted(names)
except TypeError:
# For pandas compatibility, we also try to handle the case
# where some column names are strings and others are ints. Just
# assume that everything that isn't a str is numerical, we
# can't sort anything else.
try:
str_names = sorted(n for n in names if isinstance(n, str))
non_str_names = sorted(
n for n in names if not isinstance(n, str)
)
names = non_str_names + str_names
except TypeError:
names = list(names)
else:
names = list(names)
# Combine the index and table columns for each Frame into a list of
# [...index_cols, ...table_cols].
#
# If any of the input frames have a non-empty index, include these
# columns in the list of columns to concatenate, even if the input
# frames are empty and `ignore_index=True`.
columns = [
(
[]
if are_all_range_index
or (ignore_index and not empty_has_index)
else list(f._index._data.columns)
)
+ [f._data[name] if name in f._data else None for name in names]
for f in objs
]
# Get a list of the combined index and table column indices
indices = list(range(functools.reduce(max, map(len, columns))))
# The position of the first table column in each
# combined index + table columns list
first_data_column_position = len(indices) - len(names)
# Get the non-null columns and their dtypes
non_null_cols, dtypes = _get_non_null_cols_and_dtypes(indices, columns)
# Infer common dtypes between numeric columns
# and combine CategoricalColumn categories
categories = _find_common_dtypes_and_categories(non_null_cols, dtypes)
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
_cast_cols_to_common_dtypes(indices, columns, dtypes, categories)
# Construct input tables with the index and data columns in the same
# order. This strips the given index/column names and replaces the
# names with their integer positions in the `cols` list
tables = []
for cols in columns:
table_index = None
if 1 == first_data_column_position:
table_index = cudf.core.index.as_index(cols[0])
elif first_data_column_position > 1:
table_index = DataFrame._from_data(
data=dict(
zip(
indices[:first_data_column_position],
cols[:first_data_column_position],
)
)
)
tables.append(
DataFrame._from_data(
data=dict(
zip(
indices[first_data_column_position:],
cols[first_data_column_position:],
)
),
index=table_index,
)
)
# Concatenate the Tables
out = cls._from_data(
*libcudf.concat.concat_tables(
tables, ignore_index=ignore_index or are_all_range_index
)
)
# If ignore_index is True, all input frames are empty, and at
# least one input frame has an index, assign a new RangeIndex
# to the result frame.
if empty_has_index and num_empty_input_frames == len(objs):
out._index = cudf.RangeIndex(result_index_length)
elif are_all_range_index and not ignore_index:
out._index = cudf.core.index.GenericIndex._concat(
[o._index for o in objs]
)
# Reassign the categories for any categorical table cols
_reassign_categories(
categories, out._data, indices[first_data_column_position:]
)
# Reassign the categories for any categorical index cols
if not isinstance(out._index, cudf.RangeIndex):
_reassign_categories(
categories,
out._index._data,
indices[:first_data_column_position],
)
if not isinstance(out._index, MultiIndex) and is_categorical_dtype(
out._index._values.dtype
):
out = out.set_index(
cudf.core.index.as_index(out.index._values)
)
for name, col in out._data.items():
out._data[name] = col._with_type_metadata(
tables[0]._data[name].dtype
)
# Reassign index and column names
if objs[0]._data.multiindex:
out._set_column_names_like(objs[0])
else:
out.columns = names
if not ignore_index:
out._index.name = objs[0]._index.name
out._index.names = objs[0]._index.names
return out
def astype(self, dtype, copy=False, errors="raise", **kwargs):
if is_dict_like(dtype):
if len(set(dtype.keys()) - set(self._data.names)) > 0:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
else:
dtype = {cc: dtype for cc in self._data.names}
return super().astype(dtype, copy, errors, **kwargs)
def _clean_renderable_dataframe(self, output):
"""
This method takes in partial/preprocessed dataframe
and returns correct representation of it with correct
dimensions (rows x columns)
"""
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
output = output.to_pandas().to_string(
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
lines = output.split("\n")
if lines[-1].startswith("["):
lines = lines[:-1]
lines.append(
"[%d rows x %d columns]" % (len(self), len(self._data.names))
)
return "\n".join(lines)
def _clean_nulls_from_dataframe(self, df):
"""
This function converts all ``null`` values to ``<NA>`` for
representation as a string in `__repr__`.
Since we utilize Pandas `__repr__` at all places in our code
for formatting purposes, we convert columns to `str` dtype for
filling with `<NA>` values.
"""
for col in df._data:
if is_list_dtype(df._data[col]) or is_struct_dtype(df._data[col]):
# TODO we need to handle this
pass
elif df._data[col].has_nulls():
fill_value = (
str(cudf.NaT)
if isinstance(
df._data[col],
(
cudf.core.column.DatetimeColumn,
cudf.core.column.TimeDeltaColumn,
),
)
else str(cudf.NA)
)
df[col] = df._data[col].astype("str").fillna(fill_value)
else:
df[col] = df._data[col]
return df
def _get_renderable_dataframe(self):
"""
Takes rows and columns from pandas settings or estimation from size.
pulls quadrants based off of some known parameters then style for
multiindex as well producing an efficient representative string
for printing with the dataframe.
"""
max_rows = pd.options.display.max_rows
nrows = np.max([len(self) if max_rows is None else max_rows, 1])
if pd.options.display.max_rows == 0:
nrows = len(self)
ncols = (
pd.options.display.max_columns
if pd.options.display.max_columns
else pd.options.display.width / 2
)
if len(self) <= nrows and len(self._data.names) <= ncols:
output = self.copy(deep=False)
elif self.empty and len(self.index) > 0:
max_seq_items = pd.options.display.max_seq_items
# In case of Empty DataFrame with index, Pandas prints
# first `pd.options.display.max_seq_items` index values
# followed by ... To obtain ... at the end of index list,
# adding 1 extra value.
# If `pd.options.display.max_seq_items` is None,
# entire sequence/Index is to be printed.
# Note : Pandas truncates the dimensions at the end of
# the resulting dataframe when `display.show_dimensions`
# is set to truncate. Hence to display the dimensions we
# need to extract maximum of `max_seq_items` and `nrows`
# and have 1 extra value for ... to show up in the output
# string.
if max_seq_items is not None:
output = self.head(max(max_seq_items, nrows) + 1)
else:
output = self.copy(deep=False)
else:
left_cols = len(self._data.names)
right_cols = 0
upper_rows = len(self)
lower_rows = 0
if len(self) > nrows and nrows > 0:
upper_rows = int(nrows / 2.0) + 1
lower_rows = upper_rows + (nrows % 2)
if len(self._data.names) > ncols:
right_cols = len(self._data.names) - int(ncols / 2.0)
# adjust right columns for output if multiindex.
right_cols = (
right_cols - 1
if isinstance(self.index, MultiIndex)
else right_cols
)
left_cols = int(ncols / 2.0) + 1
if right_cols > 0:
# Pick ncols - left_cols number of columns
# from the right side/from the end.
right_cols = -(int(ncols) - left_cols + 1)
else:
# If right_cols is 0 or negative, it means
# self has lesser number of columns than ncols.
# Hence assign len(self._data.names) which
# will result in empty `*_right` quadrants.
# This is because `*_left` quadrants will
# contain all columns.
right_cols = len(self._data.names)
upper_left = self.head(upper_rows).iloc[:, :left_cols]
upper_right = self.head(upper_rows).iloc[:, right_cols:]
lower_left = self.tail(lower_rows).iloc[:, :left_cols]
lower_right = self.tail(lower_rows).iloc[:, right_cols:]
upper = cudf.concat([upper_left, upper_right], axis=1)
lower = cudf.concat([lower_left, lower_right], axis=1)
output = cudf.concat([upper, lower])
output = self._clean_nulls_from_dataframe(output)
output._index = output._index._clean_nulls_from_index()
return output
@_cudf_nvtx_annotate
def __repr__(self):
output = self._get_renderable_dataframe()
return self._clean_renderable_dataframe(output)
@_cudf_nvtx_annotate
def _repr_html_(self):
lines = (
self._get_renderable_dataframe()
.to_pandas()
._repr_html_()
.split("\n")
)
if lines[-2].startswith("<p>"):
lines = lines[:-2]
lines.append(
"<p>%d rows × %d columns</p>"
% (len(self), len(self._data.names))
)
lines.append("</div>")
return "\n".join(lines)
@_cudf_nvtx_annotate
def _repr_latex_(self):
return self._get_renderable_dataframe().to_pandas()._repr_latex_()
@_cudf_nvtx_annotate
def _get_columns_by_label(
self, labels, *, downcast=False
) -> Self | Series:
"""
Return columns of dataframe by `labels`
If downcast is True, try and downcast from a DataFrame to a Series
"""
ca = self._data.select_by_label(labels)
if downcast:
if is_scalar(labels):
nlevels = 1
elif isinstance(labels, tuple):
nlevels = len(labels)
if self._data.multiindex is False or nlevels == self._data.nlevels:
out = self._constructor_sliced._from_data(
ca, index=self.index, name=labels
)
return out
out = self.__class__._from_data(
ca, index=self.index, columns=ca.to_pandas_index()
)
return out
def _make_operands_and_index_for_binop(
self,
other: Any,
fn: str,
fill_value: Any = None,
reflect: bool = False,
can_reindex: bool = False,
*args,
**kwargs,
) -> Tuple[
Union[
Dict[Optional[str], Tuple[ColumnBase, Any, bool, Any]],
NotImplementedType,
],
Optional[BaseIndex],
bool,
]:
lhs, rhs = self._data, other
index = self._index
fill_requires_key = False
left_default: Any = False
equal_columns = False
can_use_self_column_name = True
if _is_scalar_or_zero_d_array(other):
rhs = {name: other for name in self._data}
equal_columns = True
elif isinstance(other, Series):
rhs = dict(zip(other.index.to_pandas(), other.values_host))
# For keys in right but not left, perform binops between NaN (not
# NULL!) and the right value (result is NaN).
left_default = as_column(np.nan, length=len(self))
equal_columns = other.index.to_pandas().equals(
self._data.to_pandas_index()
)
can_use_self_column_name = (
equal_columns
or list(other._index._data.names) == self._data._level_names
)
elif isinstance(other, DataFrame):
if (
not can_reindex
and fn in cudf.utils.utils._EQUALITY_OPS
and (
not self.index.equals(other.index)
or not self._data.to_pandas_index().equals(
other._data.to_pandas_index()
)
)
):
raise ValueError(
"Can only compare identically-labeled DataFrame objects"
)
new_lhs, new_rhs = _align_indices(self, other)
index = new_lhs._index
lhs, rhs = new_lhs._data, new_rhs._data
fill_requires_key = True
# For DataFrame-DataFrame ops, always default to operating against
# the fill value.
left_default = fill_value
equal_columns = self._column_names == other._column_names
can_use_self_column_name = (
equal_columns
or self._data._level_names == other._data._level_names
)
elif isinstance(other, (dict, abc.Mapping)):
# Need to fail early on host mapping types because we ultimately
# convert everything to a dict.
return NotImplemented, None, True
if not isinstance(rhs, (dict, abc.Mapping)):
return NotImplemented, None, True
operands = {
k: (
v,
rhs.get(k, fill_value),
reflect,
fill_value if (not fill_requires_key or k in rhs) else None,
)
for k, v in lhs.items()
}
if left_default is not False:
for k, v in rhs.items():
if k not in lhs:
operands[k] = (left_default, v, reflect, None)
if not equal_columns:
if isinstance(other, DataFrame):
column_names_list = self._data.to_pandas_index().join(
other._data.to_pandas_index(), how="outer"
)
elif isinstance(other, Series):
column_names_list = self._data.to_pandas_index().join(
other.index.to_pandas(), how="outer"
)
else:
raise ValueError("other must be a DataFrame or Series.")
sorted_dict = {key: operands[key] for key in column_names_list}
return sorted_dict, index, can_use_self_column_name
return operands, index, can_use_self_column_name
@classmethod
@_cudf_nvtx_annotate
def from_dict(
cls,
data: dict,
orient: str = "columns",
dtype: Optional[Dtype] = None,
columns: Optional[list] = None,
) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index', 'tight'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
If 'tight', assume a dict with keys ['index', 'columns', 'data',
'index_names', 'column_names'].
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ``ValueError``
if used with ``orient='columns'`` or ``orient='tight'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
DataFrame.to_dict : Convert the DataFrame to a dictionary.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> import cudf
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> cudf.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 11, 12, 13]}
>>> cudf.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 10 11 12 13
When using the 'index' orientation, the column names can be
specified manually:
>>> cudf.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 10 11 12 13
Specify ``orient='tight'`` to create the DataFrame using a 'tight'
format:
>>> data = {'index': [('a', 'b'), ('a', 'c')],
... 'columns': [('x', 1), ('y', 2)],
... 'data': [[1, 3], [2, 4]],
... 'index_names': ['n1', 'n2'],
... 'column_names': ['z1', 'z2']}
>>> cudf.DataFrame.from_dict(data, orient='tight')
z1 x y
z2 1 2
n1 n2
a b 1 3
c 2 4
""" # noqa: E501
orient = orient.lower()
if orient == "index":
if len(data) > 0 and isinstance(
next(iter(data.values())), (cudf.Series, cupy.ndarray)
):
result = cls(data).T
result.columns = columns
if dtype is not None:
result = result.astype(dtype)
return result
else:
return cls.from_pandas(
pd.DataFrame.from_dict(
data=data,
orient=orient,
dtype=dtype,
columns=columns,
)
)
elif orient == "columns":
if columns is not None:
raise ValueError(
"Cannot use columns parameter with orient='columns'"
)
return cls(data, columns=None, dtype=dtype)
elif orient == "tight":
if columns is not None:
raise ValueError(
"Cannot use columns parameter with orient='right'"
)
index = _from_dict_create_index(
data["index"], data["index_names"], cudf
)
columns = _from_dict_create_index(
data["columns"], data["column_names"], pd
)
return cls(data["data"], index=index, columns=columns, dtype=dtype)
else:
raise ValueError(
"Expected 'index', 'columns' or 'tight' for orient "
f"parameter. Got '{orient}' instead"
)
@_cudf_nvtx_annotate
def to_dict(
self,
orient: str = "dict",
into: type[dict] = dict,
) -> dict | list[dict]:
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'tight', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'tight' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values],
'index_names' -> [index.names], 'column_names' -> [column.names]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
>>> df.to_dict('tight')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
""" # noqa: E501
orient = orient.lower()
if orient == "series":
# Special case needed to avoid converting
# cudf.Series objects into pd.Series
if not inspect.isclass(into):
cons = type(into) # type: ignore[assignment]
if isinstance(into, defaultdict):
cons = functools.partial(cons, into.default_factory)
elif issubclass(into, abc.Mapping):
cons = into # type: ignore[assignment]
if issubclass(into, defaultdict):
raise TypeError(
"to_dict() only accepts initialized defaultdicts"
)
else:
raise TypeError(f"unsupported type: {into}")
return cons(self.items()) # type: ignore[misc]
return self.to_pandas().to_dict(orient=orient, into=into)
@_cudf_nvtx_annotate
def scatter_by_map(
self, map_index, map_size=None, keep_index=True, **kwargs
):
"""Scatter to a list of dataframes.
Uses map_index to determine the destination
of each row of the original DataFrame.
Parameters
----------
map_index : Series, str or list-like
Scatter assignment for each row
map_size : int
Length of output list. Must be >= uniques in map_index
keep_index : bool
Conserve original index values for each row
Returns
-------
A list of cudf.DataFrame objects.
Raises
------
ValueError
If the map_index has invalid entries (not all in [0,
num_partitions)).
"""
# map_index might be a column name or array,
# make it a Column
if isinstance(map_index, str):
map_index = self._data[map_index]
elif isinstance(map_index, cudf.Series):
map_index = map_index._column
else:
map_index = as_column(map_index)
# Convert float to integer
if map_index.dtype.kind == "f":
map_index = map_index.astype(np.int32)
# Convert string or categorical to integer
if isinstance(map_index, cudf.core.column.StringColumn):
map_index = map_index.as_categorical_column(
"category"
).as_numerical
warnings.warn(
"Using StringColumn for map_index in scatter_by_map. "
"Use an integer array/column for better performance."
)
elif isinstance(map_index, cudf.core.column.CategoricalColumn):
map_index = map_index.as_numerical
warnings.warn(
"Using CategoricalColumn for map_index in scatter_by_map. "
"Use an integer array/column for better performance."
)
if kwargs.get("debug", False) == 1 and map_size is not None:
count = map_index.distinct_count()
if map_size < count:
raise ValueError(
f"ERROR: map_size must be >= {count} (got {map_size})."
)
partitioned_columns, output_offsets = libcudf.partitioning.partition(
[*(self._index._columns if keep_index else ()), *self._columns],
map_index,
map_size,
)
partitioned = self._from_columns_like_self(
partitioned_columns,
column_names=self._column_names,
index_names=self._index_names if keep_index else None,
)
# due to the split limitation mentioned
# here: https://github.com/rapidsai/cudf/issues/4607
# we need to remove first & last elements in offsets.
# TODO: Remove this after the above issue is fixed.
output_offsets = output_offsets[1:-1]
result = partitioned._split(output_offsets, keep_index=keep_index)
if map_size:
result += [
self._empty_like(keep_index)
for _ in range(map_size - len(result))
]
return result
@_cudf_nvtx_annotate
def update(
self,
other,
join="left",
overwrite=True,
filter_func=None,
errors="ignore",
):
"""
Modify a DataFrame in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label with the
original DataFrame. If a Series is passed, its name attribute must
be set, and that will be used as the column name to align with the
original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and
columns of the original object.
overwrite : {True, False}, default True
How to handle non-NA values for overlapping keys:
True: overwrite original DataFrame's values with values from other.
False: only update values that are NA in the original DataFrame.
filter_func : None
filter_func is not supported yet
Return True for values that should be updated.S
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and other
both contain non-NA data in the same place.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
- When ``errors`` = 'raise' and there's overlapping non-NA data.
- When ``errors`` is not either 'ignore' or 'raise'
NotImplementedError
- If ``join`` != 'left'
"""
# TODO: Support other joins
if join != "left":
raise NotImplementedError("Only left join is supported")
if errors not in {"ignore", "raise"}:
raise ValueError(
"The parameter errors must be either 'ignore' or 'raise'"
)
if filter_func is not None:
raise NotImplementedError("filter_func is not supported yet")
if not isinstance(other, DataFrame):
other = DataFrame(other)
self_cols = self._data.to_pandas_index()
if not self_cols.equals(other._data.to_pandas_index()):
other = other.reindex(self_cols, axis=1)
if not self.index.equals(other.index):
other = other.reindex(self.index, axis=0)
source_df = self.copy(deep=False)
for col in source_df._column_names:
this = source_df[col]
that = other[col]
if errors == "raise":
mask_this = that.notna()
mask_that = this.notna()
if (mask_this & mask_that).any():
raise ValueError("Data overlaps.")
if overwrite:
mask = that.isna()
else:
mask = this.notna()
# don't overwrite columns unnecessarily
if mask.all():
continue
source_df[col] = source_df[col].where(mask, that)
self._mimic_inplace(source_df, inplace=True)
@_cudf_nvtx_annotate
def __iter__(self):
return iter(self._column_names)
@_cudf_nvtx_annotate
def __contains__(self, item):
# This must check against containment in the pandas Index and not
# self._column_names to handle NA, None, nan, etc. correctly.
return item in self._data.to_pandas_index()
@_cudf_nvtx_annotate
def items(self):
"""Iterate over column names and series pairs"""
for k in self:
yield (k, self[k])
@_cudf_nvtx_annotate
def equals(self, other, **kwargs):
ret = super().equals(other)
# If all other checks matched, validate names.
if ret:
for self_name, other_name in zip(
self._data.names, other._data.names
):
if self_name != other_name:
ret = False
break
return ret
@property
def iat(self):
"""
Alias for ``DataFrame.iloc``; provided for compatibility with Pandas.
"""
return self.iloc
@property
def at(self):
"""
Alias for ``DataFrame.loc``; provided for compatibility with Pandas.
"""
return self.loc
@property # type: ignore
@_external_only_api(
"Use _column_names instead, or _data.to_pandas_index() if a pandas "
"index is absolutely necessary. For checking if the columns are a "
"MultiIndex, use _data.multiindex."
)
@_cudf_nvtx_annotate
def columns(self):
"""Returns a tuple of columns"""
return self._data.to_pandas_index()
@columns.setter # type: ignore
@_cudf_nvtx_annotate
def columns(self, columns):
if isinstance(columns, cudf.BaseIndex):
columns = columns.to_pandas()
if columns is None:
columns = pd.Index(range(len(self._data.columns)))
is_multiindex = isinstance(columns, pd.MultiIndex)
if isinstance(columns, (Series, cudf.Index, ColumnBase)):
columns = pd.Index(columns.to_numpy(), tupleize_cols=is_multiindex)
elif not isinstance(columns, pd.Index):
columns = pd.Index(columns, tupleize_cols=is_multiindex)
if not len(columns) == len(self._data.names):
raise ValueError(
f"Length mismatch: expected {len(self._data.names)} elements, "
f"got {len(columns)} elements"
)
self._set_column_names(columns, is_multiindex, columns.names)
def _set_column_names(self, names, multiindex=False, level_names=None):
data = dict(zip(names, self._data.columns))
if len(names) != len(data):
raise ValueError("Duplicate column names are not allowed")
self._data = ColumnAccessor(
data,
multiindex=multiindex,
level_names=level_names,
)
def _set_column_names_like(self, other):
self._set_column_names(
other._data.names, other._data.multiindex, other._data.level_names
)
@_cudf_nvtx_annotate
def reindex(
self,
labels=None,
index=None,
columns=None,
axis=None,
method=None,
copy=True,
level=None,
fill_value=NA,
limit=None,
tolerance=None,
):
"""
Conform DataFrame to new index. Places NA/NaN in locations
having no value in the previous index. A new object is produced
unless the new index is equivalent to the current one and copy=False.
Parameters
----------
labels : Index, Series-convertible, optional, default None
New labels / index to conform the axis specified by ``axis`` to.
index : Index, Series-convertible, optional, default None
The index labels specifying the index to conform to.
columns : array-like, optional, default None
The column labels specifying the columns to conform to.
axis : Axis to target.
Can be either the axis name
(``index``, ``columns``) or number (0, 1).
method : Not supported
copy : boolean, default True
Return a new object, even if the passed indexes are the same.
level : Not supported
fill_value : Value to use for missing values.
Defaults to ``NA``, but can be any "compatible" value.
limit : Not supported
tolerance : Not supported
Returns
-------
DataFrame with changed index.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We _highly_ recommend using keyword arguments to clarify your intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = cudf.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404 0.07
Iceweasel <NA> <NA>
Comodo Dragon <NA> <NA>
IE10 404 0.08
Chrome 200 0.02
.. pandas-compat::
**DataFrame.reindex**
Note: One difference from Pandas is that ``NA`` is used for rows
that do not match, rather than ``NaN``. One side effect of this is
that the column ``http_status`` retains an integer dtype in cuDF
where it is cast to float in Pandas.
We can fill in the missing values by
passing a value to the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 <NA>
Chrome 200 <NA>
Safari 404 <NA>
IE10 404 <NA>
Konqueror 301 <NA>
Or we can use "axis-style" keyword arguments
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 <NA>
Chrome 200 <NA>
Safari 404 <NA>
IE10 404 <NA>
Konqueror 301 <NA>
"""
if labels is None and index is None and columns is None:
return self.copy(deep=copy)
# pandas simply ignores the labels keyword if it is provided in
# addition to index and columns, but it prohibits the axis arg.
if (index is not None or columns is not None) and axis is not None:
raise TypeError(
"Cannot specify both 'axis' and any of 'index' or 'columns'."
)
axis = 0 if axis is None else self._get_axis_from_axis_arg(axis)
if axis == 0:
if index is None:
index = labels
else:
if columns is None:
columns = labels
df = (
self
if columns is None
else self[list(set(self._column_names) & set(columns))]
)
return df._reindex(
column_names=columns,
dtypes=self._dtypes,
deep=copy,
index=index,
inplace=False,
fill_value=fill_value,
)
@_cudf_nvtx_annotate
def set_index(
self,
keys,
drop=True,
append=False,
inplace=False,
verify_integrity=False,
):
"""Return a new DataFrame with a new index
Parameters
----------
keys : Index, Series-convertible, label-like, or list
Index : the new index.
Series-convertible : values for the new index.
Label-like : Label of column to be used as index.
List : List of items from above.
drop : boolean, default True
Whether to drop corresponding column for str index argument
append : boolean, default True
Whether to append columns to the existing index,
resulting in a MultiIndex.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : boolean, default False
Check for duplicates in the new index.
Examples
--------
>>> df = cudf.DataFrame({
... "a": [1, 2, 3, 4, 5],
... "b": ["a", "b", "c", "d","e"],
... "c": [1.0, 2.0, 3.0, 4.0, 5.0]
... })
>>> df
a b c
0 1 a 1.0
1 2 b 2.0
2 3 c 3.0
3 4 d 4.0
4 5 e 5.0
Set the index to become the 'b' column:
>>> df.set_index('b')
a c
b
a 1 1.0
b 2 2.0
c 3 3.0
d 4 4.0
e 5 5.0
Create a MultiIndex using columns 'a' and 'b':
>>> df.set_index(["a", "b"])
c
a b
1 a 1.0
2 b 2.0
3 c 3.0
4 d 4.0
5 e 5.0
Set new Index instance as index:
>>> df.set_index(cudf.RangeIndex(10, 15))
a b c
10 1 a 1.0
11 2 b 2.0
12 3 c 3.0
13 4 d 4.0
14 5 e 5.0
Setting `append=True` will combine current index with column `a`:
>>> df.set_index("a", append=True)
b c
a
0 1 a 1.0
1 2 b 2.0
2 3 c 3.0
3 4 d 4.0
4 5 e 5.0
`set_index` supports `inplace` parameter too:
>>> df.set_index("a", inplace=True)
>>> df
b c
a
1 a 1.0
2 b 2.0
3 c 3.0
4 d 4.0
5 e 5.0
"""
if not isinstance(keys, list):
keys = [keys]
# Preliminary type check
col_not_found = []
columns_to_add = []
names = []
to_drop = []
for col in keys:
# Is column label
if is_scalar(col) or isinstance(col, tuple):
if col in self._column_names:
columns_to_add.append(self[col])
names.append(col)
if drop:
to_drop.append(col)
else:
col_not_found.append(col)
else:
# Try coerce into column
if not is_column_like(col):
try:
col = as_column(col)
except TypeError:
msg = f"{col} cannot be converted to column-like."
raise TypeError(msg)
if isinstance(col, (MultiIndex, pd.MultiIndex)):
col = (
cudf.from_pandas(col)
if isinstance(col, pd.MultiIndex)
else col
)
cols = [col._data[x] for x in col._data]
columns_to_add.extend(cols)
names.extend(col.names)
else:
if isinstance(col, (pd.RangeIndex, cudf.RangeIndex)):
# Corner case: RangeIndex does not need to instantiate
columns_to_add.append(col)
else:
# For pandas obj, convert to gpu obj
columns_to_add.append(as_column(col))
if isinstance(
col, (cudf.Series, cudf.Index, pd.Series, pd.Index)
):
names.append(col.name)
else:
names.append(None)
if col_not_found:
raise KeyError(f"None of {col_not_found} are in the columns")
if append:
idx_cols = [self.index._data[x] for x in self.index._data]
if isinstance(self.index, MultiIndex):
idx_names = self.index.names
else:
idx_names = [self.index.name]
columns_to_add = idx_cols + columns_to_add
names = idx_names + names
if len(columns_to_add) == 0:
raise ValueError("No valid columns to be added to index.")
elif (
len(columns_to_add) == 1
and len(keys) == 1
and not isinstance(keys[0], (cudf.MultiIndex, pd.MultiIndex))
):
idx = cudf.Index(columns_to_add[0], name=names[0])
else:
idx = MultiIndex._from_data(
{i: col for i, col in enumerate(columns_to_add)}
)
idx.names = names
if not isinstance(idx, BaseIndex):
raise ValueError("Parameter index should be type `Index`.")
df = self if inplace else self.copy(deep=True)
if verify_integrity and not idx.is_unique:
raise ValueError(f"Values in Index are not unique: {idx}")
if to_drop:
df.drop(columns=to_drop, inplace=True)
df.index = idx
return df if not inplace else None
@_cudf_nvtx_annotate
def where(self, cond, other=None, inplace=False):
from cudf.core._internals.where import (
_check_and_cast_columns_with_other,
_make_categorical_like,
)
# First process the condition.
if isinstance(cond, Series):
cond = self._from_data_like_self(
{name: cond._column for name in self._column_names},
)
elif hasattr(cond, "__cuda_array_interface__"):
cond = DataFrame(
cond, columns=self._column_names, index=self.index
)
elif (
hasattr(cond, "__array_interface__")
and cond.__array_interface__["shape"] != self.shape
):
raise ValueError("conditional must be same shape as self")
elif not isinstance(cond, DataFrame):
cond = cudf.DataFrame(cond)
if set(self._column_names).intersection(set(cond._column_names)):
if not self.index.equals(cond.index):
cond = cond.reindex(self.index)
else:
if cond.shape != self.shape:
raise ValueError(
"Array conditional must be same shape as self"
)
# Setting `self` column names to `cond` as it has no column names.
cond._set_column_names_like(self)
# If other was provided, process that next.
if isinstance(other, DataFrame):
other_cols = [other._data[col] for col in self._column_names]
elif cudf.api.types.is_scalar(other):
other_cols = [other] * len(self._column_names)
elif isinstance(other, cudf.Series):
other_cols = other.to_pandas()
else:
other_cols = other
if len(self._columns) != len(other_cols):
raise ValueError(
"""Replacement list length or number of data columns
should be equal to number of columns of self"""
)
out = {}
for (name, col), other_col in zip(self._data.items(), other_cols):
col, other_col = _check_and_cast_columns_with_other(
source_col=col,
other=other_col,
inplace=inplace,
)
if cond_col := cond._data.get(name):
result = cudf._lib.copying.copy_if_else(
col, other_col, cond_col
)
out[name] = _make_categorical_like(result, self._data[name])
else:
out_mask = cudf._lib.null_mask.create_null_mask(
len(col),
state=cudf._lib.null_mask.MaskState.ALL_NULL,
)
out[name] = col.set_mask(out_mask)
return self._mimic_inplace(
self._from_data_like_self(out), inplace=inplace
)
@docutils.doc_apply(
doc_reset_index_template.format(
klass="DataFrame",
argument="",
return_type="DataFrame or None",
return_doc="",
example="""
>>> df = cudf.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal <NA>
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal <NA>
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal <NA>
You can also use ``reset_index`` with MultiIndex.
>>> index = cudf.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> df = cudf.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=('speed', 'type'))
>>> df
speed type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey <NA> jump
>>> df.reset_index(level='class')
class speed type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal <NA> jump
""",
)
)
def reset_index(
self, level=None, drop=False, inplace=False, col_level=0, col_fill=""
):
return self._mimic_inplace(
DataFrame._from_data(
*self._reset_index(
level=level,
drop=drop,
col_level=col_level,
col_fill=col_fill,
)
),
inplace=inplace,
)
@_cudf_nvtx_annotate
def insert(self, loc, name, value, nan_as_null=None):
"""Add a column to DataFrame at the index specified by loc.
Parameters
----------
loc : int
location to insert by index, cannot be greater then num columns + 1
name : number or string
name or label of column to be inserted
value : Series or array-like
nan_as_null : bool, Default None
If ``None``/``True``, converts ``np.nan`` values to
``null`` values.
If ``False``, leaves ``np.nan`` values as is.
"""
return self._insert(
loc=loc,
name=name,
value=value,
nan_as_null=nan_as_null,
ignore_index=False,
)
@_cudf_nvtx_annotate
def _insert(self, loc, name, value, nan_as_null=None, ignore_index=True):
"""
Same as `insert`, with additional `ignore_index` param.
ignore_index : bool, default True
If True, there will be no index equality check & reindexing
happening.
If False, a reindexing operation is performed if
`value.index` is not equal to `self.index`.
"""
if name in self._data:
raise NameError(f"duplicated column name {name}")
num_cols = len(self._data)
if loc < 0:
loc += num_cols + 1
if not (0 <= loc <= num_cols):
raise ValueError(
f"insert location must be within range "
f"{-(num_cols + 1) * (num_cols > 0)}, "
f"{num_cols * (num_cols > 0)}"
)
# TODO: This check is currently necessary because
# _is_scalar_or_zero_d_array below will treat a length 1 pd.Categorical
# as a scalar and attempt to use column.full, which can't handle it.
# Maybe _is_scalar_or_zero_d_array should be changed, or maybe we just
# shouldn't support pd.Categorical at all, but those changes will at
# least require a deprecation cycle because we currently support
# inserting a pd.Categorical.
if isinstance(value, pd.Categorical):
value = cudf.core.column.categorical.pandas_categorical_as_column(
value
)
if _is_scalar_or_zero_d_array(value):
value = column.full(
len(self),
value,
"str" if libcudf.scalar._is_null_host_scalar(value) else None,
)
if len(self) == 0:
if isinstance(value, (pd.Series, Series)):
if not ignore_index:
self._index = as_index(value.index)
elif len(value) > 0:
self._index = RangeIndex(start=0, stop=len(value))
new_data = self._data.__class__()
if num_cols != 0:
for col_name in self._data:
new_data[col_name] = column.column_empty_like(
self._data[col_name],
masked=True,
newsize=len(value),
)
self._data = new_data
elif isinstance(value, (pd.Series, Series)):
value = Series(value, nan_as_null=nan_as_null)
if not ignore_index:
value = value._align_to_index(
self._index, how="right", sort=False
)
value = column.as_column(value, nan_as_null=nan_as_null)
self._data.insert(name, value, loc=loc)
@property # type:ignore
@_cudf_nvtx_annotate
def axes(self):
"""
Return a list representing the axes of the DataFrame.
DataFrame.axes returns a list of two elements:
element zero is the row index and element one is the columns.
Examples
--------
>>> import cudf
>>> cdf1 = cudf.DataFrame()
>>> cdf1["key"] = [0,0,1,1]
>>> cdf1["k2"] = [1,2,2,3]
>>> cdf1["val"] = [1,2,3,4]
>>> cdf1["temp"] = [-1,2,2,3]
>>> cdf1.axes
[RangeIndex(start=0, stop=4, step=1),
Index(['key', 'k2', 'val', 'temp'], dtype='object')]
"""
return [self._index, self._data.to_pandas_index()]
def diff(self, periods=1, axis=0):
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is element in previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference,
accepts negative values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Only row-wise (0) shift is supported.
Returns
-------
DataFrame
First differences of the DataFrame.
Notes
-----
Diff currently only supports numeric dtype columns.
Examples
--------
>>> import cudf
>>> gdf = cudf.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> gdf
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> gdf.diff(periods=2)
a b c
0 <NA> <NA> <NA>
1 <NA> <NA> <NA>
2 2 1 8
3 2 2 12
4 2 3 16
5 2 5 20
"""
if not is_integer(periods):
if not (is_float(periods) and periods.is_integer()):
raise ValueError("periods must be an integer")
periods = int(periods)
axis = self._get_axis_from_axis_arg(axis)
if axis != 0:
raise NotImplementedError("Only axis=0 is supported.")
if abs(periods) > len(self):
df = cudf.DataFrame._from_data(
{
name: column_empty(len(self), dtype=dtype, masked=True)
for name, dtype in zip(self._column_names, self.dtypes)
}
)
return df
return self - self.shift(periods=periods)
@_cudf_nvtx_annotate
def drop_duplicates(
self,
subset=None,
keep="first",
inplace=False,
ignore_index=False,
):
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time
indexes are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', ``False``}, default 'first'
Determines which duplicates (if any) to keep.
- 'first' : Drop duplicates except for the first occurrence.
- 'last' : Drop duplicates except for the last occurrence.
- ``False`` : Drop all duplicates.
inplace : bool, default ``False``
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default ``False``
If True, the resulting axis will be labeled 0, 1, ..., n - 1.
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider a dataset containing ramen ratings.
>>> import cudf
>>> df = cudf.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
""" # noqa: E501
outdf = super().drop_duplicates(
subset=subset,
keep=keep,
ignore_index=ignore_index,
)
return self._mimic_inplace(outdf, inplace=inplace)
@_cudf_nvtx_annotate
def pop(self, item):
"""Return a column and drop it from the DataFrame."""
popped = self[item]
del self[item]
return popped
@_cudf_nvtx_annotate
def rename(
self,
mapper=None,
index=None,
columns=None,
axis=0,
copy=True,
inplace=False,
level=None,
errors="ignore",
):
"""Alter column and index labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
``DataFrame.rename`` supports two calling conventions:
- ``(index=index_mapper, columns=columns_mapper, ...)``
- ``(mapper, axis={0/'index' or 1/'column'}, ...)``
We highly recommend using keyword arguments to clarify your intent.
Parameters
----------
mapper : dict-like or function, default None
optional dict-like or functions transformations to apply to
the index/column values depending on selected ``axis``.
index : dict-like, default None
Optional dict-like transformations to apply to the index axis'
values. Does not support functions for axis 0 yet.
columns : dict-like or function, default None
optional dict-like or functions transformations to apply to
the columns axis' values.
axis : int, default 0
Axis to rename with mapper.
0 or 'index' for index
1 or 'columns' for columns
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Return new DataFrame. If True, assign columns without copy
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'raise', 'ignore', 'warn'}, default 'ignore'
*Only 'ignore' supported*
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original
object.
- ``warn`` : prints last exceptions as warnings and
return original object.
Returns
-------
DataFrame
Notes
-----
Difference from pandas:
* Not supporting: level
Rename will not overwrite column names. If a list with duplicates is
passed, column names will be postfixed with a number.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df
A B
0 1 4
1 2 5
2 3 6
Rename columns using a mapping:
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: 10, 1: 20, 2: 30})
A B
10 1 4
20 2 5
30 3 6
"""
if errors != "ignore":
raise NotImplementedError(
"Only errors='ignore' is currently supported"
)
if mapper is None and index is None and columns is None:
return self.copy(deep=copy)
index = mapper if index is None and axis in (0, "index") else index
columns = (
mapper if columns is None and axis in (1, "columns") else columns
)
if index:
if (
any(type(item) == str for item in index.values())
and type(self.index) != cudf.StringIndex
):
raise NotImplementedError(
"Implicit conversion of index to "
"mixed type is not yet supported."
)
if level is not None and isinstance(self.index, MultiIndex):
out_index = self.index.copy(deep=copy)
out_index.get_level_values(level).to_frame().replace(
to_replace=list(index.keys()),
value=list(index.values()),
inplace=True,
)
out = DataFrame(index=out_index)
else:
to_replace = list(index.keys())
vals = list(index.values())
is_all_na = vals.count(None) == len(vals)
try:
index_data = {
name: col.find_and_replace(to_replace, vals, is_all_na)
for name, col in self.index._data.items()
}
except OverflowError:
index_data = self.index._data.copy(deep=True)
out = DataFrame(index=_index_from_data(index_data))
else:
out = DataFrame(index=self.index)
if columns:
out._data = self._data.rename_levels(mapper=columns, level=level)
else:
out._data = self._data.copy(deep=copy)
if inplace:
self._data = out._data
else:
return out.copy(deep=copy)
@_cudf_nvtx_annotate
def add_prefix(self, prefix):
out = self.copy(deep=True)
out.columns = [
prefix + col_name for col_name in list(self._data.keys())
]
return out
@_cudf_nvtx_annotate
def add_suffix(self, suffix):
out = self.copy(deep=True)
out.columns = [
col_name + suffix for col_name in list(self._data.keys())
]
return out
@_cudf_nvtx_annotate
def agg(self, aggs, axis=None):
"""
Aggregate using one or more operations over the specified axis.
Parameters
----------
aggs : Iterable (set, list, string, tuple or dict)
Function to use for aggregating data. Accepted types are:
* string name, e.g. ``"sum"``
* list of functions, e.g. ``["sum", "min", "max"]``
* dict of axis labels specified operations per column,
e.g. ``{"a": "sum"}``
axis : not yet supported
Returns
-------
Aggregation Result : ``Series`` or ``DataFrame``
When ``DataFrame.agg`` is called with single agg,
``Series`` is returned.
When ``DataFrame.agg`` is called with several aggs,
``DataFrame`` is returned.
Notes
-----
Difference from pandas:
* Not supporting: ``axis``, ``*args``, ``**kwargs``
"""
# TODO: Remove the typecasting below once issue #6846 is fixed
# link <https://github.com/rapidsai/cudf/issues/6846>
dtypes = [self[col].dtype for col in self._column_names]
common_dtype = find_common_type(dtypes)
df_normalized = self.astype(common_dtype)
if any(is_string_dtype(dt) for dt in dtypes):
raise NotImplementedError(
"DataFrame.agg() is not supported for "
"frames containing string columns"
)
if axis == 0 or axis is not None:
raise NotImplementedError("axis not implemented yet")
if isinstance(aggs, abc.Iterable) and not isinstance(
aggs, (str, dict)
):
result = DataFrame()
# TODO : Allow simultaneous pass for multi-aggregation as
# a future optimization
for agg in aggs:
result[agg] = getattr(df_normalized, agg)()
return result.T.sort_index(axis=1, ascending=True)
elif isinstance(aggs, str):
if not hasattr(df_normalized, aggs):
raise AttributeError(
f"{aggs} is not a valid function for "
f"'DataFrame' object"
)
result = DataFrame()
result[aggs] = getattr(df_normalized, aggs)()
result = result.iloc[:, 0]
result.name = None
return result
elif isinstance(aggs, dict):
cols = aggs.keys()
if any(callable(val) for val in aggs.values()):
raise NotImplementedError(
"callable parameter is not implemented yet"
)
elif all(isinstance(val, str) for val in aggs.values()):
result = cudf.Series(index=cols)
for key, value in aggs.items():
col = df_normalized[key]
if not hasattr(col, value):
raise AttributeError(
f"{value} is not a valid function for "
f"'Series' object"
)
result[key] = getattr(col, value)()
elif all(isinstance(val, abc.Iterable) for val in aggs.values()):
idxs = set()
for val in aggs.values():
if isinstance(val, str):
idxs.add(val)
elif isinstance(val, abc.Iterable):
idxs.update(val)
idxs = sorted(list(idxs))
for agg in idxs:
if agg is callable:
raise NotImplementedError(
"callable parameter is not implemented yet"
)
result = DataFrame(index=idxs, columns=cols)
for key in aggs.keys():
col = df_normalized[key]
col_empty = column_empty(
len(idxs), dtype=col.dtype, masked=True
)
ans = cudf.Series(data=col_empty, index=idxs)
if isinstance(aggs.get(key), abc.Iterable):
# TODO : Allow simultaneous pass for multi-aggregation
# as a future optimization
for agg in aggs.get(key):
if not hasattr(col, agg):
raise AttributeError(
f"{agg} is not a valid function for "
f"'Series' object"
)
ans[agg] = getattr(col, agg)()
elif isinstance(aggs.get(key), str):
if not hasattr(col, aggs.get(key)):
raise AttributeError(
f"{aggs.get(key)} is not a valid function for "
f"'Series' object"
)
ans[aggs.get(key)] = getattr(col, agg)()
result[key] = ans
else:
raise ValueError("values of dict must be a string or list")
return result
elif callable(aggs):
raise NotImplementedError(
"callable parameter is not implemented yet"
)
else:
raise ValueError("argument must be a string, list or dict")
@_cudf_nvtx_annotate
def nlargest(self, n, columns, keep="first"):
"""Return the first *n* rows ordered by *columns* in descending order.
Return the first *n* rows with the largest values in *columns*, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
Notes
-----
Difference from pandas:
- Only a single column is supported in *columns*
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return self._n_largest_or_smallest(True, n, columns, keep)
def nsmallest(self, n, columns, keep="first"):
"""Return the first *n* rows ordered by *columns* in ascending order.
Return the first *n* rows with the smallest values in *columns*, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
Returns
-------
DataFrame
Notes
-----
Difference from pandas:
- Only a single column is supported in *columns*
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
"""
return self._n_largest_or_smallest(False, n, columns, keep)
@_cudf_nvtx_annotate
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int or str, default -2
First level of index to be swapped.
j : int or str, default -1
Second level of index to be swapped.
axis : The axis to swap levels on.
0 or 'index' for row-wise, 1 or 'columns' for column-wise.
Examples
--------
>>> import cudf
>>> midx = cudf.MultiIndex(levels=[['llama', 'cow', 'falcon'],
... ['speed', 'weight', 'length'],['first','second']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2],
... [0, 0, 0, 0, 0, 0, 1, 1, 1]])
>>> cdf = cudf.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250], [1, 0.8], [0.3, 0.2]])
>>> cdf
big small
llama speed first 45.0 30.0
weight first 200.0 100.0
length first 1.5 1.0
cow speed first 30.0 20.0
weight first 250.0 150.0
length first 1.5 0.8
falcon speed second 320.0 250.0
weight second 1.0 0.8
length second 0.3 0.2
>>> cdf.swaplevel()
big small
llama first speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow first speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon second speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
"""
result = self.copy()
# To get axis number
axis = self._get_axis_from_axis_arg(axis)
if axis == 0:
if not isinstance(result.index, MultiIndex):
raise TypeError("Can only swap levels on a hierarchical axis.")
result.index = result.index.swaplevel(i, j)
else:
if not result._data.multiindex:
raise TypeError("Can only swap levels on a hierarchical axis.")
result._data = result._data.swaplevel(i, j)
return result
@_cudf_nvtx_annotate
def transpose(self):
"""Transpose index and columns.
Returns
-------
a new (ncol x nrow) dataframe. self is (nrow x ncol)
Notes
-----
Difference from pandas:
Not supporting *copy* because default and only behavior is copy=True
"""
index = self._data.to_pandas_index()
columns = self.index.copy(deep=False)
if self._num_columns == 0 or self._num_rows == 0:
return DataFrame(index=index, columns=columns)
# No column from index is transposed with libcudf.
source_columns = [*self._columns]
source_dtype = source_columns[0].dtype
if is_categorical_dtype(source_dtype):
if any(not is_categorical_dtype(c.dtype) for c in source_columns):
raise ValueError("Columns must all have the same dtype")
cats = list(c.categories for c in source_columns)
cats = cudf.core.column.concat_columns(cats).unique()
source_columns = [
col._set_categories(cats, is_unique=True).codes
for col in source_columns
]
if any(c.dtype != source_columns[0].dtype for c in source_columns):
raise ValueError("Columns must all have the same dtype")
result_columns = libcudf.transpose.transpose(source_columns)
if is_categorical_dtype(source_dtype):
result_columns = [
codes._with_type_metadata(
cudf.core.dtypes.CategoricalDtype(categories=cats)
)
for codes in result_columns
]
else:
result_columns = [
result_column._with_type_metadata(source_dtype)
for result_column in result_columns
]
# Set the old column names as the new index
result = self.__class__._from_data(
{i: col for i, col in enumerate(result_columns)},
index=as_index(index),
)
# Set the old index as the new column names
result.columns = columns
return result
T = property(transpose, doc=transpose.__doc__)
@_cudf_nvtx_annotate
def melt(self, **kwargs):
"""Unpivots a DataFrame from wide format to long format,
optionally leaving identifier variables set.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
default: None
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot.
default: all columns that are not set as `id_vars`.
var_name : scalar
Name to use for the `variable` column.
default: frame.columns.name or 'variable'
value_name : str
Name to use for the `value` column.
default: 'value'
Returns
-------
out : DataFrame
Melted result
"""
from cudf.core.reshape import melt
return melt(self, **kwargs)
@_cudf_nvtx_annotate
def merge(
self,
right,
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
how="inner",
sort=False,
lsuffix=None,
rsuffix=None,
indicator=False,
suffixes=("_x", "_y"),
):
"""Merge GPU DataFrame objects by performing a database-style join
operation by columns or indexes.
Parameters
----------
right : DataFrame
on : label or list; defaults to None
Column or index level names to join on. These must be found in
both DataFrames.
If on is None and not merging on indexes then
this defaults to the intersection of the columns
in both DataFrames.
how : {'left', 'outer', 'inner', 'leftsemi', 'leftanti'}, \
default 'inner'
Type of merge to be performed.
- left : use only keys from left frame, similar to a SQL left
outer join.
- right : not supported.
- outer : use union of keys from both frames, similar to a SQL
full outer join.
- inner : use intersection of keys from both frames, similar to
a SQL inner join.
- leftsemi : similar to ``inner`` join, but only returns columns
from the left dataframe and ignores all columns from the
right dataframe.
- leftanti : returns only rows columns from the left dataframe
for non-matched records. This is exact opposite to ``leftsemi``
join.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame.
Can also be an array or list of arrays of the length of the
left DataFrame. These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame.
Can also be an array or list of arrays of the length of the
right DataFrame. These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s).
right_index : bool, default False
Use the index from the right DataFrame as the join key.
sort : bool, default False
Sort the resulting dataframe by the columns that were merged on,
starting from the left.
suffixes: Tuple[str, str], defaults to ('_x', '_y')
Suffixes applied to overlapping column names on the left and right
sides
Returns
-------
merged : DataFrame
Notes
-----
**DataFrames merges in cuDF result in non-deterministic row ordering.**
Examples
--------
>>> import cudf
>>> df_a = cudf.DataFrame()
>>> df_a['key'] = [0, 1, 2, 3, 4]
>>> df_a['vals_a'] = [float(i + 10) for i in range(5)]
>>> df_b = cudf.DataFrame()
>>> df_b['key'] = [1, 2, 4]
>>> df_b['vals_b'] = [float(i+10) for i in range(3)]
>>> df_merged = df_a.merge(df_b, on=['key'], how='left')
>>> df_merged.sort_values('key') # doctest: +SKIP
key vals_a vals_b
3 0 10.0
0 1 11.0 10.0
1 2 12.0 11.0
4 3 13.0
2 4 14.0 12.0
**Merging on categorical variables is only allowed in certain cases**
Categorical variable typecasting logic depends on both `how`
and the specifics of the categorical variables to be merged.
Merging categorical variables when only one side is ordered
is ambiguous and not allowed. Merging when both categoricals
are ordered is allowed, but only when the categories are
exactly equal and have equal ordering, and will result in the
common dtype.
When both sides are unordered, the result categorical depends
on the kind of join:
- For inner joins, the result will be the intersection of the
categories
- For left or right joins, the result will be the left or
right dtype respectively. This extends to semi and anti joins.
- For outer joins, the result will be the union of categories
from both sides.
"""
if indicator:
raise NotImplementedError(
"Only indicator=False is currently supported"
)
if lsuffix or rsuffix:
raise ValueError(
"The lsuffix and rsuffix keywords have been replaced with the "
"``suffixes=`` keyword. "
"Please provide the following instead: \n\n"
" suffixes=('%s', '%s')"
% (lsuffix or "_x", rsuffix or "_y")
)
else:
lsuffix, rsuffix = suffixes
lhs, rhs = self, right
merge_cls = Merge
if how == "right":
# Merge doesn't support right, so just swap
how = "left"
lhs, rhs = right, self
left_on, right_on = right_on, left_on
left_index, right_index = right_index, left_index
suffixes = (suffixes[1], suffixes[0])
elif how in {"leftsemi", "leftanti"}:
merge_cls = MergeSemi
return merge_cls(
lhs,
rhs,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
how=how,
sort=sort,
indicator=indicator,
suffixes=suffixes,
).perform_merge()
@_cudf_nvtx_annotate
def join(
self,
other,
on=None,
how="left",
lsuffix="",
rsuffix="",
sort=False,
):
"""Join columns with other DataFrame on index or on a key column.
Parameters
----------
other : DataFrame
how : str
Only accepts "left", "right", "inner", "outer"
lsuffix, rsuffix : str
The suffices to add to the left (*lsuffix*) and right (*rsuffix*)
column names when avoiding conflicts.
sort : bool
Set to True to ensure sorted ordering.
Returns
-------
joined : DataFrame
Notes
-----
Difference from pandas:
- *other* must be a single DataFrame for now.
- *on* is not supported yet due to lack of multi-index support.
"""
if on is not None:
raise NotImplementedError("The on parameter is not yet supported")
df = self.merge(
other,
left_index=True,
right_index=True,
how=how,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
df.index.name = (
None if self.index.name != other.index.name else self.index.name
)
return df
@_cudf_nvtx_annotate
@docutils.doc_apply(
groupby_doc_template.format(
ret=textwrap.dedent(
"""
Returns
-------
DataFrameGroupBy
Returns a DataFrameGroupBy object that contains
information about the groups.
"""
)
)
)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=no_default,
group_keys=False,
squeeze=False,
observed=True,
dropna=True,
):
return super().groupby(
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
observed,
dropna,
)
def query(self, expr, local_dict=None):
"""
Query with a boolean expression using Numba to compile a GPU kernel.
See pandas.DataFrame.query.
Parameters
----------
expr : str
A boolean expression. Names in expression refer to columns.
`index` can be used instead of index name, but this is not
supported for MultiIndex.
Names starting with `@` refer to Python variables.
An output value will be `null` if any of the input values are
`null` regardless of expression.
local_dict : dict
Containing the local variable to be used in query.
Returns
-------
filtered : DataFrame
Examples
--------
>>> df = cudf.DataFrame({
... "a": [1, 2, 2],
... "b": [3, 4, 5],
... })
>>> expr = "(a == 2 and b == 4) or (b == 3)"
>>> df.query(expr)
a b
0 1 3
1 2 4
DateTime conditionals:
>>> import numpy as np
>>> import datetime
>>> df = cudf.DataFrame()
>>> data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')
>>> df['datetimes'] = data
>>> search_date = datetime.datetime.strptime('2018-10-08', '%Y-%m-%d')
>>> df.query('datetimes==@search_date')
datetimes
1 2018-10-08
Using local_dict:
>>> import numpy as np
>>> import datetime
>>> df = cudf.DataFrame()
>>> data = np.array(['2018-10-07', '2018-10-08'], dtype='datetime64')
>>> df['datetimes'] = data
>>> search_date2 = datetime.datetime.strptime('2018-10-08', '%Y-%m-%d')
>>> df.query('datetimes==@search_date',
... local_dict={'search_date': search_date2})
datetimes
1 2018-10-08
.. pandas-compat::
**DataFrame.query**
One difference from pandas is that ``query`` currently only
supports numeric, datetime, timedelta, or bool dtypes.
"""
# can't use `annotate` decorator here as we inspect the calling
# environment.
with annotate("DATAFRAME_QUERY", color="purple", domain="cudf_python"):
if local_dict is None:
local_dict = {}
if self.empty:
return self.copy()
if not isinstance(local_dict, dict):
raise TypeError(
f"local_dict type: expected dict but found "
f"{type(local_dict)}"
)
# Get calling environment
callframe = inspect.currentframe().f_back
callenv = {
"locals": callframe.f_locals,
"globals": callframe.f_globals,
"local_dict": local_dict,
}
# Run query
boolmask = queryutils.query_execute(self, expr, callenv)
return self._apply_boolean_mask(
BooleanMask.from_column_unchecked(boolmask)
)
@_cudf_nvtx_annotate
def apply(
self, func, axis=1, raw=False, result_type=None, args=(), **kwargs
):
"""
Apply a function along an axis of the DataFrame.
``apply`` relies on Numba to JIT compile ``func``.
Thus the allowed operations within ``func`` are limited to `those
supported by the CUDA Python Numba target
<https://numba.readthedocs.io/en/stable/cuda/cudapysupported.html>`__.
For more information, see the `cuDF guide to user defined functions
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>`__.
Some string functions and methods are supported. Refer to the guide
to UDFs for details.
Parameters
----------
func : function
Function to apply to each row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied.
- 0 or 'index': apply function to each column (not yet supported).
- 1 or 'columns': apply function to each row.
raw: bool, default False
Not yet supported
result_type: {'expand', 'reduce', 'broadcast', None}, default None
Not yet supported
args: tuple
Positional arguments to pass to func in addition to the dataframe.
Examples
--------
Simple function of a single variable which could be NA:
>>> def f(row):
... if row['a'] is cudf.NA:
... return 0
... else:
... return row['a'] + 1
...
>>> df = cudf.DataFrame({'a': [1, cudf.NA, 3]})
>>> df.apply(f, axis=1)
0 2
1 0
2 4
dtype: int64
Function of multiple variables will operate in
a null aware manner:
>>> def f(row):
... return row['a'] - row['b']
...
>>> df = cudf.DataFrame({
... 'a': [1, cudf.NA, 3, cudf.NA],
... 'b': [5, 6, cudf.NA, cudf.NA]
... })
>>> df.apply(f)
0 -4
1 <NA>
2 <NA>
3 <NA>
dtype: int64
Functions may conditionally return NA as in pandas:
>>> def f(row):
... if row['a'] + row['b'] > 3:
... return cudf.NA
... else:
... return row['a'] + row['b']
...
>>> df = cudf.DataFrame({
... 'a': [1, 2, 3],
... 'b': [2, 1, 1]
... })
>>> df.apply(f, axis=1)
0 3
1 3
2 <NA>
dtype: int64
Mixed types are allowed, but will return the common
type, rather than object as in pandas:
>>> def f(row):
... return row['a'] + row['b']
...
>>> df = cudf.DataFrame({
... 'a': [1, 2, 3],
... 'b': [0.5, cudf.NA, 3.14]
... })
>>> df.apply(f, axis=1)
0 1.5
1 <NA>
2 6.14
dtype: float64
Functions may also return scalar values, however the
result will be promoted to a safe type regardless of
the data:
>>> def f(row):
... if row['a'] > 3:
... return row['a']
... else:
... return 1.5
...
>>> df = cudf.DataFrame({
... 'a': [1, 3, 5]
... })
>>> df.apply(f, axis=1)
0 1.5
1 1.5
2 5.0
dtype: float64
Ops against N columns are supported generally:
>>> def f(row):
... v, w, x, y, z = (
... row['a'], row['b'], row['c'], row['d'], row['e']
... )
... return x + (y - (z / w)) % v
...
>>> df = cudf.DataFrame({
... 'a': [1, 2, 3],
... 'b': [4, 5, 6],
... 'c': [cudf.NA, 4, 4],
... 'd': [8, 7, 8],
... 'e': [7, 1, 6]
... })
>>> df.apply(f, axis=1)
0 <NA>
1 4.8
2 5.0
dtype: float64
UDFs manipulating string data are allowed, as long as
they neither modify strings in place nor create new strings.
For example, the following UDF is allowed:
>>> def f(row):
... st = row['str_col']
... scale = row['scale']
... if len(st) == 0:
... return -1
... elif st.startswith('a'):
... return 1 - scale
... elif 'example' in st:
... return 1 + scale
... else:
... return 42
...
>>> df = cudf.DataFrame({
... 'str_col': ['', 'abc', 'some_example'],
... 'scale': [1, 2, 3]
... })
>>> df.apply(f, axis=1) # doctest: +SKIP
0 -1
1 -1
2 4
dtype: int64
However, the following UDF is not allowed since it includes an
operation that requires the creation of a new string: a call to the
``upper`` method. Methods that are not supported in this manner
will raise an ``AttributeError``.
>>> def f(row):
... st = row['str_col'].upper()
... return 'ABC' in st
>>> df.apply(f, axis=1) # doctest: +SKIP
For a complete list of supported functions and methods that may be
used to manipulate string data, see the UDF guide,
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>
"""
if axis != 1:
raise ValueError(
"DataFrame.apply currently only supports row wise ops"
)
if raw:
raise ValueError("The `raw` kwarg is not yet supported.")
if result_type is not None:
raise ValueError("The `result_type` kwarg is not yet supported.")
return self._apply(func, _get_row_kernel, *args, **kwargs)
def applymap(
self,
func: Callable[[Any], Any],
na_action: Union[str, None] = None,
**kwargs,
) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If 'ignore', propagate NaN values, without passing them to func.
Returns
-------
DataFrame
Transformed DataFrame.
"""
if kwargs:
raise NotImplementedError(
"DataFrame.applymap does not yet support **kwargs."
)
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
if na_action == "ignore":
devfunc = numba.cuda.jit(device=True)(func)
# promote to a null-ignoring function
# this code is never run in python, it only
# exists to provide numba with the correct
# bytecode to generate the equivalent PTX
# as a null-ignoring version of the function
def _func(x): # pragma: no cover
if x is NA:
return NA
else:
return devfunc(x)
else:
_func = func
# TODO: naive implementation
# this could be written as a single kernel
result = {}
for name, col in self._data.items():
apply_sr = Series._from_data({None: col})
result[name] = apply_sr.apply(_func)
return DataFrame._from_data(result, index=self.index)
@_cudf_nvtx_annotate
@applyutils.doc_apply()
def apply_rows(
self,
func,
incols,
outcols,
kwargs,
pessimistic_nulls=True,
cache_key=None,
):
"""
Apply a row-wise user defined function.
Parameters
----------
{params}
Examples
--------
The user function should loop over the columns and set the output for
each row. Loop execution order is arbitrary, so each iteration of
the loop **MUST** be independent of each other.
When ``func`` is invoked, the array args corresponding to the
input/output are strided so as to improve GPU parallelism.
The loop in the function resembles serial code, but executes
concurrently in multiple threads.
>>> import cudf
>>> import numpy as np
>>> df = cudf.DataFrame()
>>> nelem = 3
>>> df['in1'] = np.arange(nelem)
>>> df['in2'] = np.arange(nelem)
>>> df['in3'] = np.arange(nelem)
Define input columns for the kernel
>>> in1 = df['in1']
>>> in2 = df['in2']
>>> in3 = df['in3']
>>> def kernel(in1, in2, in3, out1, out2, kwarg1, kwarg2):
... for i, (x, y, z) in enumerate(zip(in1, in2, in3)):
... out1[i] = kwarg2 * x - kwarg1 * y
... out2[i] = y - kwarg1 * z
Call ``.apply_rows`` with the name of the input columns, the name and
dtype of the output columns, and, optionally, a dict of extra
arguments.
>>> df.apply_rows(kernel,
... incols=['in1', 'in2', 'in3'],
... outcols=dict(out1=np.float64, out2=np.float64),
... kwargs=dict(kwarg1=3, kwarg2=4))
in1 in2 in3 out1 out2
0 0 0 0 0.0 0.0
1 1 1 1 1.0 -2.0
2 2 2 2 2.0 -4.0
"""
for col in incols:
current_col_dtype = self._data[col].dtype
if is_string_dtype(current_col_dtype) or is_categorical_dtype(
current_col_dtype
):
raise TypeError(
"User defined functions are currently not "
"supported on Series with dtypes `str` and `category`."
)
return applyutils.apply_rows(
self,
func,
incols,
outcols,
kwargs,
pessimistic_nulls,
cache_key=cache_key,
)
@_cudf_nvtx_annotate
@applyutils.doc_applychunks()
def apply_chunks(
self,
func,
incols,
outcols,
kwargs=None,
pessimistic_nulls=True,
chunks=None,
blkct=None,
tpb=None,
):
"""
Transform user-specified chunks using the user-provided function.
Parameters
----------
{params}
{params_chunks}
Examples
--------
For ``tpb > 1``, ``func`` is executed by ``tpb`` number of threads
concurrently. To access the thread id and count,
use ``numba.cuda.threadIdx.x`` and ``numba.cuda.blockDim.x``,
respectively (See `numba CUDA kernel documentation`_).
.. _numba CUDA kernel documentation:\
https://numba.readthedocs.io/en/stable/cuda/kernels.html
In the example below, the *kernel* is invoked concurrently on each
specified chunk. The *kernel* computes the corresponding output
for the chunk.
By looping over the range
``range(cuda.threadIdx.x, in1.size, cuda.blockDim.x)``, the *kernel*
function can be used with any *tpb* in an efficient manner.
>>> from numba import cuda
>>> @cuda.jit
... def kernel(in1, in2, in3, out1):
... for i in range(cuda.threadIdx.x, in1.size, cuda.blockDim.x):
... x = in1[i]
... y = in2[i]
... z = in3[i]
... out1[i] = x * y + z
See also
--------
DataFrame.apply_rows
"""
if kwargs is None:
kwargs = {}
if chunks is None:
raise ValueError("*chunks* must be defined")
return applyutils.apply_chunks(
self,
func,
incols,
outcols,
kwargs,
pessimistic_nulls,
chunks,
tpb=tpb,
)
@_cudf_nvtx_annotate
def partition_by_hash(self, columns, nparts, keep_index=True):
"""Partition the dataframe by the hashed value of data in *columns*.
Parameters
----------
columns : sequence of str
The names of the columns to be hashed.
Must have at least one name.
nparts : int
Number of output partitions
keep_index : boolean
Whether to keep the index or drop it
Returns
-------
partitioned: list of DataFrame
"""
key_indices = [self._column_names.index(k) for k in columns]
if keep_index:
cols = [*self._index._columns, *self._columns]
key_indices = [i + len(self._index._columns) for i in key_indices]
else:
cols = [*self._columns]
output_columns, offsets = libcudf.hash.hash_partition(
cols, key_indices, nparts
)
outdf = self._from_columns_like_self(
output_columns,
self._column_names,
self._index_names if keep_index else None,
)
# Slice into partitions. Notice, `hash_partition` returns the start
# offset of each partition thus we skip the first offset
ret = outdf._split(offsets[1:], keep_index=keep_index)
# Calling `_split()` on an empty dataframe returns an empty list
# so we add empty partitions here
ret += [self._empty_like(keep_index) for _ in range(nparts - len(ret))]
return ret
def info(
self,
verbose=None,
buf=None,
max_cols=None,
memory_usage=None,
null_counts=None,
):
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> import cudf
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = cudf.DataFrame({"int_col": int_values,
... "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
Pipe output of DataFrame.info to a buffer instead of sys.stdout and
print buffer contents:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> print(buffer.getvalue())
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 130.0+ bytes
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> import numpy as np
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = cudf.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info(memory_usage='deep')
<class 'cudf.core.dataframe.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 14.3 MB
"""
if buf is None:
buf = sys.stdout
lines = [str(type(self))]
index_name = type(self._index).__name__
if len(self._index) > 0:
entries_summary = f", {self._index[0]} to {self._index[-1]}"
else:
entries_summary = ""
index_summary = (
f"{index_name}: {len(self._index)} entries{entries_summary}"
)
lines.append(index_summary)
if len(self._data) == 0:
lines.append(f"Empty {type(self).__name__}")
cudf.utils.ioutils.buffer_write_lines(buf, lines)
return
cols = self._column_names
col_count = len(cols)
if max_cols is None:
max_cols = pd.options.display.max_info_columns
max_rows = pd.options.display.max_info_rows
if null_counts is None:
show_counts = (col_count <= max_cols) and (len(self) < max_rows)
else:
show_counts = null_counts
exceeds_info_cols = col_count > max_cols
def _put_str(s, space):
return str(s)[:space].ljust(space)
def _verbose_repr():
lines.append(f"Data columns (total {col_count} columns):")
id_head = " # "
column_head = "Column"
col_space = 2
max_col = max(len(pprint_thing(k)) for k in cols)
len_column = len(pprint_thing(column_head))
space = max(max_col, len_column) + col_space
max_id = len(pprint_thing(col_count))
len_id = len(pprint_thing(id_head))
space_num = max(max_id, len_id) + col_space
counts = None
header = _put_str(id_head, space_num) + _put_str(
column_head, space
)
if show_counts:
counts = self.count().to_pandas().tolist()
if col_count != len(counts):
raise AssertionError(
f"Columns must equal "
f"counts ({col_count} != {len(counts)})"
)
count_header = "Non-Null Count"
len_count = len(count_header)
non_null = " non-null"
max_count = max(len(pprint_thing(k)) for k in counts) + len(
non_null
)
space_count = max(len_count, max_count) + col_space
count_temp = "{count}" + non_null
else:
count_header = ""
space_count = len(count_header)
len_count = space_count
count_temp = "{count}"
dtype_header = "Dtype"
len_dtype = len(dtype_header)
max_dtypes = max(len(pprint_thing(k)) for k in self.dtypes)
space_dtype = max(len_dtype, max_dtypes)
header += (
_put_str(count_header, space_count)
+ _put_str(dtype_header, space_dtype).rstrip()
)
lines.append(header)
lines.append(
_put_str("-" * len_id, space_num)
+ _put_str("-" * len_column, space)
+ _put_str("-" * len_count, space_count)
+ _put_str("-" * len_dtype, space_dtype).rstrip()
)
for i, col in enumerate(self._column_names):
dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
line_no = _put_str(f" {i}", space_num)
count = ""
if show_counts:
count = counts[i]
lines.append(
line_no
+ _put_str(col, space)
+ _put_str(count_temp.format(count=count), space_count)
+ _put_str(dtype, space_dtype).rstrip()
)
def _non_verbose_repr():
if col_count > 0:
entries_summary = f", {cols[0]} to {cols[-1]}"
else:
entries_summary = ""
columns_summary = f"Columns: {col_count} entries{entries_summary}"
lines.append(columns_summary)
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f}{size_qualifier} {x}"
num /= 1024.0
return f"{num:3.1f}{size_qualifier} PB"
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
dtype_counts = defaultdict(int)
for col in self._data:
dtype_counts[self._data[col].dtype.name] += 1
dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(dtype_counts.items())]
lines.append(f"dtypes: {', '.join(dtypes)}")
if memory_usage is None:
memory_usage = pd.options.display.memory_usage
if memory_usage:
# append memory usage of df to display
size_qualifier = ""
if memory_usage == "deep":
deep = True
else:
deep = False
if "object" in dtype_counts or self.index.dtype == "object":
size_qualifier = "+"
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append(
f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n"
)
cudf.utils.ioutils.buffer_write_lines(buf, lines)
@_cudf_nvtx_annotate
@docutils.doc_describe()
def describe(
self,
percentiles=None,
include=None,
exclude=None,
datetime_is_numeric=False,
):
"""{docstring}"""
if not include and not exclude:
default_include = [np.number]
if datetime_is_numeric:
default_include.append("datetime")
else:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
"`datetime_is_numeric` is deprecated. Specify "
"`datetime_is_numeric=True` to silence this "
"warning and adopt the future behavior now.",
FutureWarning,
)
data_to_describe = self.select_dtypes(include=default_include)
if data_to_describe._num_columns == 0:
data_to_describe = self
elif include == "all":
if exclude is not None:
raise ValueError("exclude must be None when include is 'all'")
data_to_describe = self
else:
data_to_describe = self.select_dtypes(
include=include, exclude=exclude
)
if data_to_describe.empty:
raise ValueError("No data of included types.")
describe_series_list = [
data_to_describe[col].describe(
percentiles=percentiles,
datetime_is_numeric=datetime_is_numeric,
)
for col in data_to_describe._column_names
]
if len(describe_series_list) == 1:
return describe_series_list[0].to_frame()
else:
ldesc_indexes = sorted(
(x.index for x in describe_series_list), key=len
)
names = dict.fromkeys(
[
name
for idxnames in ldesc_indexes
for name in idxnames.to_pandas()
],
None,
)
return cudf.concat(
[
series.reindex(names, copy=False)
for series in describe_series_list
],
axis=1,
sort=False,
)
@_cudf_nvtx_annotate
def to_pandas(self, nullable=False, **kwargs):
"""
Convert to a Pandas DataFrame.
Parameters
----------
nullable : Boolean, Default False
If ``nullable`` is ``True``, the resulting columns
in the dataframe will be having a corresponding
nullable Pandas dtype. If there is no corresponding
nullable Pandas dtype present, the resulting dtype
will be a regular pandas dtype.
If ``nullable`` is ``False``,
the resulting columns will either convert null
values to ``np.nan`` or ``None`` depending on the dtype.
Returns
-------
out : Pandas DataFrame
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [0, 1, 2], 'b': [-3, 2, 0]})
>>> pdf = df.to_pandas()
>>> pdf
a b
0 0 -3
1 1 2
2 2 0
>>> type(pdf)
<class 'pandas.core.frame.DataFrame'>
``nullable`` parameter can be used to control
whether dtype can be Pandas Nullable or not:
>>> df = cudf.DataFrame({'a': [0, None, 2], 'b': [True, False, None]})
>>> df
a b
0 0 True
1 <NA> False
2 2 <NA>
>>> pdf = df.to_pandas(nullable=True)
>>> pdf
a b
0 0 True
1 <NA> False
2 2 <NA>
>>> pdf.dtypes
a Int64
b boolean
dtype: object
>>> pdf = df.to_pandas(nullable=False)
>>> pdf
a b
0 0.0 True
1 NaN False
2 2.0 None
>>> pdf.dtypes
a float64
b object
dtype: object
"""
out_data = {}
out_index = self.index.to_pandas()
for i, col_key in enumerate(self._data):
out_data[i] = self._data[col_key].to_pandas(
index=out_index, nullable=nullable
)
out_df = pd.DataFrame(out_data, index=out_index)
out_df.columns = self._data.to_pandas_index()
return out_df
@classmethod
@_cudf_nvtx_annotate
def from_pandas(cls, dataframe, nan_as_null=no_default):
"""
Convert from a Pandas DataFrame.
Parameters
----------
dataframe : Pandas DataFrame object
A Pandas DataFrame object which has to be converted
to cuDF DataFrame.
nan_as_null : bool, Default True
If ``True``, converts ``np.nan`` values to ``null`` values.
If ``False``, leaves ``np.nan`` values as is.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> data = [[0,1], [1,2], [3,4]]
>>> pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)
>>> cudf.from_pandas(pdf)
a b
0 0 1
1 1 2
2 3 4
"""
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
if isinstance(dataframe, pd.DataFrame):
if not dataframe.columns.is_unique:
raise ValueError("Duplicate column names are not allowed")
# Set columns
data = {}
for col_name, col_value in dataframe.items():
# necessary because multi-index can return multiple
# columns for a single key
if len(col_value.shape) == 1:
data[col_name] = column.as_column(
col_value.array, nan_as_null=nan_as_null
)
else:
vals = col_value.values.T
if vals.shape[0] == 1:
data[col_name] = column.as_column(
vals.flatten(), nan_as_null=nan_as_null
)
else:
if isinstance(col_name, tuple):
col_name = str(col_name)
for idx in range(len(vals.shape)):
data[col_name] = column.as_column(
vals[idx], nan_as_null=nan_as_null
)
index = cudf.from_pandas(dataframe.index, nan_as_null=nan_as_null)
df = cls._from_data(data, index)
df._data._level_names = tuple(dataframe.columns.names)
if isinstance(dataframe.columns, pd.RangeIndex):
df._data.rangeindex = True
# Set columns only if it is a MultiIndex
elif isinstance(dataframe.columns, pd.MultiIndex):
df.columns = dataframe.columns
return df
else:
try:
return from_dataframe(dataframe, allow_copy=True)
except Exception:
raise TypeError(
f"Could not construct DataFrame from {type(dataframe)}"
)
@classmethod
@_cudf_nvtx_annotate
def from_arrow(cls, table):
"""
Convert from PyArrow Table to DataFrame.
Parameters
----------
table : PyArrow Table Object
PyArrow Table Object which has to be converted to cudf DataFrame.
Raises
------
TypeError for invalid input type.
Returns
-------
cudf DataFrame
Notes
-----
- Does not support automatically setting index column(s) similar
to how ``to_pandas`` works for PyArrow Tables.
Examples
--------
>>> import cudf
>>> import pyarrow as pa
>>> data = pa.table({"a":[1, 2, 3], "b":[4, 5, 6]})
>>> cudf.DataFrame.from_arrow(data)
a b
0 1 4
1 2 5
2 3 6
"""
index_col = None
col_index_names = None
if isinstance(table, pa.Table) and isinstance(
table.schema.pandas_metadata, dict
):
index_col = table.schema.pandas_metadata["index_columns"]
if "column_indexes" in table.schema.pandas_metadata:
col_index_names = []
for col_meta in table.schema.pandas_metadata["column_indexes"]:
col_index_names.append(col_meta["name"])
out = super().from_arrow(table)
if col_index_names is not None:
out._data._level_names = col_index_names
if index_col:
if isinstance(index_col[0], dict):
idx = cudf.RangeIndex(
index_col[0]["start"],
index_col[0]["stop"],
name=index_col[0]["name"],
)
if len(idx) == len(out):
# `idx` is generated from arrow `pandas_metadata`
# which can get out of date with many of the
# arrow operations. Hence verifying if the
# lengths match, or else don't need to set
# an index at all i.e., Default RangeIndex
# will be set.
# See more about the discussion here:
# https://github.com/apache/arrow/issues/15178
out = out.set_index(idx)
else:
out = out.set_index(index_col[0])
return out
@_cudf_nvtx_annotate
def to_arrow(self, preserve_index=True):
"""
Convert to a PyArrow Table.
Parameters
----------
preserve_index : bool, default True
whether index column and its meta data needs to be saved or not
Returns
-------
PyArrow Table
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame(
... {"a":[1, 2, 3], "b":[4, 5, 6]}, index=[1, 2, 3])
>>> df.to_arrow()
pyarrow.Table
a: int64
b: int64
index: int64
----
a: [[1,2,3]]
b: [[4,5,6]]
index: [[1,2,3]]
>>> df.to_arrow(preserve_index=False)
pyarrow.Table
a: int64
b: int64
----
a: [[1,2,3]]
b: [[4,5,6]]
"""
data = self.copy(deep=False)
index_descr = []
if preserve_index:
if isinstance(self.index, cudf.RangeIndex):
descr = {
"kind": "range",
"name": self.index.name,
"start": self.index._start,
"stop": self.index._stop,
"step": 1,
}
else:
if isinstance(self.index, MultiIndex):
gen_names = tuple(
f"level_{i}"
for i, _ in enumerate(self.index._data.names)
)
else:
gen_names = (
self.index.names
if self.index.name is not None
else ("index",)
)
for gen_name, col_name in zip(
gen_names, self.index._data.names
):
data._insert(
data.shape[1],
gen_name,
self.index._data[col_name],
)
descr = gen_names[0]
index_descr.append(descr)
out = super(DataFrame, data).to_arrow()
metadata = pa.pandas_compat.construct_metadata(
columns_to_convert=[self[col] for col in self._data.names],
df=self,
column_names=out.schema.names,
index_levels=[self.index],
index_descriptors=index_descr,
preserve_index=preserve_index,
types=out.schema.types,
)
return out.replace_schema_metadata(metadata)
@_cudf_nvtx_annotate
def to_records(self, index=True):
"""Convert to a numpy recarray
Parameters
----------
index : bool
Whether to include the index in the output.
Returns
-------
numpy recarray
"""
members = [("index", self.index.dtype)] if index else []
members += [(col, self[col].dtype) for col in self._data.names]
dtype = np.dtype(members)
ret = np.recarray(len(self), dtype=dtype)
if index:
ret["index"] = self.index.to_numpy()
for col in self._data.names:
ret[col] = self[col].to_numpy()
return ret
@classmethod
@_cudf_nvtx_annotate
def from_records(cls, data, index=None, columns=None, nan_as_null=False):
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : numpy structured dtype or recarray of ndim=2
index : str, array-like
The name of the index column in *data*.
If None, the default index is used.
columns : list of str
List of column names to include.
Returns
-------
DataFrame
"""
if data.ndim != 1 and data.ndim != 2:
raise ValueError(
f"records dimension expected 1 or 2 but found {data.ndim}"
)
num_cols = len(data[0])
if columns is None and data.dtype.names is None:
names = [i for i in range(num_cols)]
elif data.dtype.names is not None:
names = data.dtype.names
else:
if len(columns) != num_cols:
raise ValueError(
f"columns length expected {num_cols} "
f"but found {len(columns)}"
)
names = columns
df = DataFrame()
if data.ndim == 2:
for i, k in enumerate(names):
df._data[k] = column.as_column(
data[:, i], nan_as_null=nan_as_null
)
elif data.ndim == 1:
for k in names:
df._data[k] = column.as_column(
data[k], nan_as_null=nan_as_null
)
if index is None:
df._index = RangeIndex(start=0, stop=len(data))
elif is_scalar(index):
df._index = RangeIndex(start=0, stop=len(data))
df = df.set_index(index)
else:
df._index = as_index(index)
if isinstance(columns, pd.Index):
df._data._level_names = tuple(columns.names)
return df
@classmethod
@_cudf_nvtx_annotate
def _from_arrays(cls, data, index=None, columns=None, nan_as_null=False):
"""Convert a numpy/cupy array to DataFrame.
Parameters
----------
data : numpy/cupy array of ndim 1 or 2,
dimensions greater than 2 are not supported yet.
index : Index or array-like
Index to use for resulting frame. Will default to
RangeIndex if no indexing information part of input data and
no index provided.
columns : list of str
List of column names to include.
Returns
-------
DataFrame
"""
data = cupy.asarray(data)
if data.ndim != 1 and data.ndim != 2:
raise ValueError(
f"records dimension expected 1 or 2 but found: {data.ndim}"
)
if data.ndim == 2:
num_cols = data.shape[1]
else:
# Since we validate ndim to be either 1 or 2 above,
# this case can be assumed to be ndim == 1.
num_cols = 1
if columns is None:
names = range(num_cols)
else:
if len(columns) != num_cols:
raise ValueError(
f"columns length expected {num_cols} but "
f"found {len(columns)}"
)
elif len(columns) != len(set(columns)):
raise ValueError("Duplicate column names are not allowed")
names = columns
df = cls()
if data.ndim == 2:
for i, k in enumerate(names):
df._data[k] = column.as_column(
data[:, i], nan_as_null=nan_as_null
)
elif data.ndim == 1:
df._data[names[0]] = column.as_column(
data, nan_as_null=nan_as_null
)
if isinstance(columns, pd.Index):
df._data._level_names = tuple(columns.names)
if isinstance(columns, (range, pd.RangeIndex, cudf.RangeIndex)):
df._data.rangeindex = True
if index is None:
df._index = RangeIndex(start=0, stop=len(data))
else:
df._index = as_index(index)
return df
@_cudf_nvtx_annotate
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction=None,
limit_area=None,
downcast=None,
**kwargs,
):
if all(dt == np.dtype("object") for dt in self.dtypes):
raise TypeError(
"Cannot interpolate with all object-dtype "
"columns in the DataFrame. Try setting at "
"least one column to a numeric dtype."
)
return super().interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
@_cudf_nvtx_annotate
def quantile(
self,
q=0.5,
axis=0,
numeric_only=True,
interpolation=None,
columns=None,
exact=True,
method="single",
):
"""
Return values at the given quantile.
Parameters
----------
q : float or array-like
0 <= q <= 1, the quantile(s) to compute
axis : int
axis is a NON-FUNCTIONAL parameter
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This parameter specifies the interpolation method to use,
when the desired quantile lies between two data points i and j.
Default is ``'linear'`` for ``method="single"``, and ``'nearest'``
for ``method="table"``.
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
columns : list of str
List of column names to include.
exact : boolean
Whether to use approximate or exact quantile algorithm.
method : {'single', 'table'}, default `'single'`
Whether to compute quantiles per-column ('single') or over all
columns ('table'). When 'table', the only allowed interpolation
methods are 'nearest', 'lower', and 'higher'.
Returns
-------
Series or DataFrame
If q is an array or numeric_only is set to False, a DataFrame
will be returned where index is q, the columns are the columns
of self, and the values are the quantile.
If q is a float, a Series will be returned where the index is
the columns of self and the values are the quantiles.
.. pandas-compat::
**DataFrame.quantile**
One notable difference from Pandas is when DataFrame is of
non-numeric types and result is expected to be a Series in case of
Pandas. cuDF will return a DataFrame as it doesn't support mixed
types under Series.
Examples
--------
>>> import cupy as cp
>>> import cudf
>>> df = cudf.DataFrame(cp.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df
a b
0 1 1
1 2 10
2 3 100
3 4 100
>>> df.quantile(0.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
""" # noqa: E501
if axis not in (0, None):
raise NotImplementedError("axis is not implemented yet")
data_df = self
if numeric_only:
data_df = data_df.select_dtypes(
include=[np.number], exclude=["datetime64", "timedelta64"]
)
if columns is None:
columns = data_df._data.names
if isinstance(q, numbers.Number):
q_is_number = True
qs = [float(q)]
elif pd.api.types.is_list_like(q):
q_is_number = False
qs = q
else:
msg = "`q` must be either a single element or list"
raise TypeError(msg)
if method == "table":
interpolation = interpolation or "nearest"
result = self._quantile_table(qs, interpolation.upper())
if q_is_number:
result = result.transpose()
return Series(
data=result._columns[0], index=result.index, name=q
)
else:
# Ensure that qs is non-scalar so that we always get a column back.
interpolation = interpolation or "linear"
result = {}
for k in data_df._data.names:
if k in columns:
ser = data_df[k]
res = ser.quantile(
qs,
interpolation=interpolation,
exact=exact,
quant_index=False,
)._column
if len(res) == 0:
res = column.column_empty_like(
qs, dtype=ser.dtype, masked=True, newsize=len(qs)
)
result[k] = res
result = DataFrame._from_data(result)
if q_is_number and numeric_only:
result = result.fillna(np.nan).iloc[0]
result.index = data_df.keys()
result.name = q
return result
result.index = cudf.Index(list(map(float, qs)), dtype="float64")
return result
@_cudf_nvtx_annotate
def isin(self, values):
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all
the labels match. If values is a Series, that's the index.
If values is a dict, the keys must be the column names,
which must match. If values is a DataFrame, then both the
index and column labels must match.
Returns
-------
DataFrame:
DataFrame of booleans showing whether each element in
the DataFrame is contained in values.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in other.
>>> other = cudf.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
# TODO: propagate nulls through isin
# https://github.com/rapidsai/cudf/issues/7556
fill_value = cudf.Scalar(False)
def make_false_column_like_self():
return column.full(len(self), fill_value, "bool")
# Preprocess different input types into a mapping from column names to
# a list of values to check.
result = {}
if isinstance(values, IndexedFrame):
# Note: In the case where values is a Series, computing some
# information about the values column outside the loop may result
# in performance gains. However, since categorical conversion
# depends on the current column in the loop, using the correct
# precomputed variables inside the loop requires nontrivial logic.
# This optimization could be attempted if `isin` ever becomes a
# bottleneck.
if (
isinstance(values, (Series, DataFrame))
and not values.index.is_unique
):
# if DataFrame ever supports duplicate columns
# would need to check that here
raise ValueError("cannot compute isin with a duplicate axis.")
values = values.reindex(self.index)
other_cols = (
values._data
if isinstance(values, DataFrame)
else {name: values._column for name in self._data}
)
for col, self_col in self._data.items():
if col in other_cols:
other_col = other_cols[col]
self_is_cat = isinstance(self_col, CategoricalColumn)
other_is_cat = isinstance(other_col, CategoricalColumn)
if self_is_cat != other_is_cat:
# It is valid to compare the levels of a categorical
# column to a non-categorical column.
if self_is_cat:
self_col = self_col._get_decategorized_column()
else:
other_col = other_col._get_decategorized_column()
# We use the type checks from _before_ the conversion
# because if only one was categorical then it's already
# been converted and we have to check if they're strings.
if self_is_cat and other_is_cat:
self_is_str = other_is_str = False
else:
# These checks must happen after the conversions above
# since numpy can't handle categorical dtypes.
self_is_str = is_string_dtype(self_col.dtype)
other_is_str = is_string_dtype(other_col.dtype)
if self_is_str != other_is_str:
# Strings can't compare to anything else.
result[col] = make_false_column_like_self()
else:
result[col] = (self_col == other_col).fillna(False)
else:
result[col] = make_false_column_like_self()
elif is_dict_like(values):
for name, col in self._data.items():
if name in values:
result[name] = col.isin(values[name])
else:
result[name] = make_false_column_like_self()
elif is_list_like(values):
for name, col in self._data.items():
result[name] = col.isin(values)
else:
raise TypeError(
"only list-like or dict-like objects are "
"allowed to be passed to DataFrame.isin(), "
"you passed a "
f"'{type(values).__name__}'"
)
# TODO: Update this logic to properly preserve MultiIndex columns.
return DataFrame._from_data(result, self.index)
#
# Stats
#
@_cudf_nvtx_annotate
def _prepare_for_rowwise_op(self, method, skipna):
"""Prepare a DataFrame for CuPy-based row-wise operations."""
if method not in _cupy_nan_methods_map and any(
col.nullable for col in self._columns
):
msg = (
f"Row-wise operations to calculate '{method}' do not "
f"currently support columns with null values. "
f"Consider removing them with .dropna() "
f"or using .fillna()."
)
raise ValueError(msg)
is_pure_dt = all(is_datetime_dtype(dt) for dt in self.dtypes)
if not is_pure_dt:
filtered = self.select_dtypes(include=[np.number, np.bool_])
else:
filtered = self.copy(deep=False)
common_dtype = find_common_type(filtered.dtypes)
if filtered._num_columns < self._num_columns:
# When we update our pandas compatibility target to 2.0, pandas
# will stop supporting numeric_only=None and users will have to
# specify True/False. At that time we should also top our implicit
# removal of non-numeric columns here.
assert Version(pd.__version__) < Version("2.0.0")
msg = (
"Row-wise operations currently only support int, float "
"and bool dtypes. Non numeric columns are ignored."
)
warnings.warn(msg)
if not skipna and any(col.nullable for col in filtered._columns):
mask = DataFrame(
{
name: filtered._data[name]._get_mask_as_column()
if filtered._data[name].nullable
else column.full(len(filtered._data[name]), True)
for name in filtered._data.names
}
)
mask = mask.all(axis=1)
else:
mask = None
coerced = filtered.astype(common_dtype, copy=False)
if is_pure_dt:
# Further convert into cupy friendly types
coerced = coerced.astype("int64", copy=False)
return coerced, mask, common_dtype
@_cudf_nvtx_annotate
def count(self, axis=0, level=None, numeric_only=False, **kwargs):
"""
Count ``non-NA`` cells for each column or row.
The values ``None``, ``NaN``, ``NaT`` are considered ``NA``.
Returns
-------
Series
For each column/row the number of non-NA/null entries.
Notes
-----
Parameters currently not supported are `axis`, `level`, `numeric_only`.
Examples
--------
>>> import cudf
>>> import numpy as np
>>> df = cudf.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
"""
axis = self._get_axis_from_axis_arg(axis)
if axis != 0:
raise NotImplementedError("Only axis=0 is currently supported.")
return Series._from_data(
{None: [self._data[col].valid_count for col in self._data.names]},
as_index(self._data.names),
)
_SUPPORT_AXIS_LOOKUP = {
0: 0,
1: 1,
"index": 0,
"columns": 1,
}
@_cudf_nvtx_annotate
def _reduce(
self,
op,
axis=None,
level=None,
numeric_only=None,
**kwargs,
):
if level is not None:
raise NotImplementedError("level parameter is not implemented yet")
source = self
if numeric_only:
numeric_cols = (
name
for name in self._data.names
if is_numeric_dtype(self._data[name])
)
source = self._get_columns_by_label(numeric_cols)
if source.empty:
return Series(index=cudf.Index([], dtype="str"))
if axis is None:
if op in {"any", "all"}:
axis = 2
else:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"In a future version, {type(self).__name__}"
f".{op}(axis=None) will return a scalar {op} over "
"the entire DataFrame. To retain the old behavior, "
f"use '{type(self).__name__}.{op}(axis=0)' or "
f"just '{type(self)}.{op}()'",
FutureWarning,
)
axis = 0
elif axis is no_default:
axis = 0
else:
axis = source._get_axis_from_axis_arg(axis)
if axis in {0, 2}:
try:
result = [
getattr(source._data[col], op)(**kwargs)
for col in source._data.names
]
except AttributeError:
if numeric_only is None and op in _numeric_reduction_ops:
# Do not remove until pandas 2.0 support is added.
warnings.warn(
f"The default value of numeric_only in DataFrame.{op} "
"is deprecated. In a future version, it will default "
"to False. In addition, specifying "
"'numeric_only=None' is deprecated. Select only valid "
"columns or specify the value of numeric_only to "
"silence this warning.",
FutureWarning,
)
numeric_cols = (
name
for name in self._data.names
if is_numeric_dtype(self._data[name])
)
source = self._get_columns_by_label(numeric_cols)
if source.empty:
if axis == 2:
return getattr(as_column([]), op)(**kwargs)
else:
return Series(index=cudf.Index([], dtype="str"))
try:
result = [
getattr(source._data[col], op)(**kwargs)
for col in source._data.names
]
except AttributeError:
raise TypeError(
f"Not all column dtypes support op {op}"
)
else:
raise
if axis == 2:
return getattr(as_column(result), op)(**kwargs)
else:
source_dtypes = [c.dtype for c in source._data.columns]
common_dtype = find_common_type(source_dtypes)
if is_object_dtype(common_dtype) and any(
not is_object_dtype(dtype) for dtype in source_dtypes
):
raise TypeError(
"Columns must all have the same dtype to "
f"perform {op=} with {axis=}"
)
return Series._from_data(
{None: as_column(result)}, as_index(source._data.names)
)
elif axis == 1:
return source._apply_cupy_method_axis_1(op, **kwargs)
else:
raise ValueError(f"Invalid value of {axis=} received for {op}")
@_cudf_nvtx_annotate
def _scan(
self,
op,
axis=None,
*args,
**kwargs,
):
if axis is None:
axis = 0
axis = self._get_axis_from_axis_arg(axis)
if axis == 0:
return super()._scan(op, axis=axis, *args, **kwargs)
elif axis == 1:
return self._apply_cupy_method_axis_1(op, **kwargs)
@_cudf_nvtx_annotate
def mode(self, axis=0, numeric_only=False, dropna=True):
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
- 0 or 'index' : get mode of each column
- 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NA/NaN/NaT.
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
cudf.Series.mode : Return the highest frequency value
in a Series.
cudf.Series.value_counts : Return the counts of values
in a Series.
Notes
-----
``axis`` parameter is currently not supported.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({
... "species": ["bird", "mammal", "arthropod", "bird"],
... "legs": [2, 4, 8, 2],
... "wings": [2.0, None, 0.0, None]
... })
>>> df
species legs wings
0 bird 2 2.0
1 mammal 4 <NA>
2 arthropod 8 0.0
3 bird 2 <NA>
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NA``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2 0.0
1 <NA> <NA> 2.0
Setting ``dropna=False``, ``NA`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 <NA>
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2 0.0
1 <NA> 2.0
"""
if axis not in (0, "index"):
raise NotImplementedError("Only axis=0 is currently supported")
if numeric_only:
data_df = self.select_dtypes(
include=[np.number], exclude=["datetime64", "timedelta64"]
)
else:
data_df = self
mode_results = [
data_df[col].mode(dropna=dropna) for col in data_df._data
]
if len(mode_results) == 0:
return DataFrame()
df = cudf.concat(mode_results, axis=1)
if isinstance(df, Series):
df = df.to_frame()
df._set_column_names_like(data_df)
return df
@_cudf_nvtx_annotate
def all(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
obj = self.select_dtypes(include="bool") if bool_only else self
return super(DataFrame, obj).all(axis, skipna, level, **kwargs)
@_cudf_nvtx_annotate
def any(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
obj = self.select_dtypes(include="bool") if bool_only else self
return super(DataFrame, obj).any(axis, skipna, level, **kwargs)
@_cudf_nvtx_annotate
def _apply_cupy_method_axis_1(self, method, *args, **kwargs):
# This method uses cupy to perform scans and reductions along rows of a
# DataFrame. Since cuDF is designed around columnar storage and
# operations, we convert DataFrames to 2D cupy arrays for these ops.
# for dask metadata compatibility
skipna = kwargs.pop("skipna", None)
skipna = True if skipna is None else skipna
if method not in _cupy_nan_methods_map and skipna not in (
None,
True,
1,
):
raise NotImplementedError(
f"Row-wise operations to calculate '{method}'"
f" currently do not support `skipna=False`."
)
level = kwargs.pop("level", None)
if level not in (None,):
raise NotImplementedError(
"Row-wise operations currently do not support `level`."
)
numeric_only = kwargs.pop("numeric_only", None)
if numeric_only not in (None, True):
raise NotImplementedError(
"Row-wise operations currently do not "
"support `numeric_only=False`."
)
min_count = kwargs.pop("min_count", None)
if min_count not in (None, 0):
raise NotImplementedError(
"Row-wise operations currently do not support `min_count`."
)
bool_only = kwargs.pop("bool_only", None)
if bool_only not in (None, True):
raise NotImplementedError(
"Row-wise operations currently do not support `bool_only`."
)
# This parameter is only necessary for axis 0 reductions that cuDF
# performs internally. cupy already upcasts smaller integer/bool types
# to int64 when accumulating.
kwargs.pop("cast_to_int", None)
prepared, mask, common_dtype = self._prepare_for_rowwise_op(
method, skipna
)
for col in prepared._data.names:
if prepared._data[col].nullable:
prepared._data[col] = (
prepared._data[col]
.astype(
cudf.utils.dtypes.get_min_float_dtype(
prepared._data[col]
)
if not is_datetime_dtype(common_dtype)
else cudf.dtype("float64")
)
.fillna(np.nan)
)
arr = prepared.to_cupy()
if skipna is not False and method in _cupy_nan_methods_map:
method = _cupy_nan_methods_map[method]
result = getattr(cupy, method)(arr, axis=1, **kwargs)
if result.ndim == 1:
type_coerced_methods = {
"count",
"min",
"max",
"sum",
"prod",
"cummin",
"cummax",
"cumsum",
"cumprod",
}
result_dtype = (
common_dtype
if method in type_coerced_methods
or is_datetime_dtype(common_dtype)
else None
)
result = column.as_column(result, dtype=result_dtype)
if mask is not None:
result = result.set_mask(
cudf._lib.transform.bools_to_mask(mask._column)
)
return Series(
result,
index=self.index,
dtype=result_dtype,
)
else:
result_df = DataFrame(result).set_index(self.index)
result_df._set_column_names_like(prepared)
return result_df
@_cudf_nvtx_annotate
def _columns_view(self, columns):
"""
Return a subset of the DataFrame's columns as a view.
"""
return DataFrame(
{col: self._data[col] for col in columns}, index=self.index
)
@_cudf_nvtx_annotate
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include : str or list
which columns to include based on dtypes
exclude : str or list
which columns to exclude based on dtypes
Returns
-------
DataFrame
The subset of the frame including the dtypes
in ``include`` and excluding the dtypes in ``exclude``.
Raises
------
ValueError
- If both of ``include`` and ``exclude`` are empty
- If ``include`` and ``exclude`` have overlapping elements
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
""" # noqa: E501
# code modified from:
# https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py#L3196
if not isinstance(include, (list, tuple)):
include = (include,) if include is not None else ()
if not isinstance(exclude, (list, tuple)):
exclude = (exclude,) if exclude is not None else ()
df = DataFrame(index=self.index)
# cudf_dtype_from_pydata_dtype can distinguish between
# np.float and np.number
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError(
"at least one of include or exclude must be nonempty"
)
include, exclude = map(
lambda x: frozenset(map(cudf_dtype_from_pydata_dtype, x)),
selection,
)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(
f"include and exclude overlap on {(include & exclude)}"
)
# include all subtypes
include_subtypes = set()
for dtype in self.dtypes:
for i_dtype in include:
# category handling
if is_categorical_dtype(i_dtype):
include_subtypes.add(i_dtype)
elif inspect.isclass(dtype.type):
if issubclass(dtype.type, i_dtype):
include_subtypes.add(dtype.type)
# exclude all subtypes
exclude_subtypes = set()
for dtype in self.dtypes:
for e_dtype in exclude:
# category handling
if is_categorical_dtype(e_dtype):
exclude_subtypes.add(e_dtype)
elif inspect.isclass(dtype.type):
if issubclass(dtype.type, e_dtype):
exclude_subtypes.add(dtype.type)
include_all = {cudf_dtype_from_pydata_dtype(d) for d in self.dtypes}
if include:
inclusion = include_all & include_subtypes
elif exclude:
inclusion = include_all
else:
inclusion = set()
# remove all exclude types
inclusion = inclusion - exclude_subtypes
for k, col in self._data.items():
infered_type = cudf_dtype_from_pydata_dtype(col.dtype)
if infered_type in inclusion:
df._insert(len(df._data), k, col)
return df
@ioutils.doc_to_parquet()
def to_parquet(
self,
path,
engine="cudf",
compression="snappy",
index=None,
partition_cols=None,
partition_file_name=None,
partition_offsets=None,
statistics="ROWGROUP",
metadata_file_path=None,
int96_timestamps=False,
row_group_size_bytes=ioutils._ROW_GROUP_SIZE_BYTES_DEFAULT,
row_group_size_rows=None,
max_page_size_bytes=None,
max_page_size_rows=None,
storage_options=None,
return_metadata=False,
use_dictionary=True,
header_version="1.0",
*args,
**kwargs,
):
"""{docstring}"""
from cudf.io import parquet
return parquet.to_parquet(
self,
path=path,
engine=engine,
compression=compression,
index=index,
partition_cols=partition_cols,
partition_file_name=partition_file_name,
partition_offsets=partition_offsets,
statistics=statistics,
metadata_file_path=metadata_file_path,
int96_timestamps=int96_timestamps,
row_group_size_bytes=row_group_size_bytes,
row_group_size_rows=row_group_size_rows,
max_page_size_bytes=max_page_size_bytes,
max_page_size_rows=max_page_size_rows,
storage_options=storage_options,
return_metadata=return_metadata,
use_dictionary=use_dictionary,
header_version=header_version,
*args,
**kwargs,
)
@ioutils.doc_to_feather()
def to_feather(self, path, *args, **kwargs):
"""{docstring}"""
from cudf.io import feather
feather.to_feather(self, path, *args, **kwargs)
@ioutils.doc_dataframe_to_csv()
def to_csv(
self,
path_or_buf=None,
sep=",",
na_rep="",
columns=None,
header=True,
index=True,
encoding=None,
compression=None,
lineterminator=None,
chunksize=None,
storage_options=None,
):
"""{docstring}"""
from cudf.io import csv
if lineterminator is None:
lineterminator = os.linesep
return csv.to_csv(
self,
path_or_buf=path_or_buf,
sep=sep,
na_rep=na_rep,
columns=columns,
header=header,
index=index,
lineterminator=lineterminator,
chunksize=chunksize,
encoding=encoding,
compression=compression,
storage_options=storage_options,
)
@ioutils.doc_to_orc()
def to_orc(
self,
fname,
compression="snappy",
statistics="ROWGROUP",
stripe_size_bytes=None,
stripe_size_rows=None,
row_index_stride=None,
cols_as_map_type=None,
storage_options=None,
index=None,
):
"""{docstring}"""
from cudf.io import orc
return orc.to_orc(
df=self,
fname=fname,
compression=compression,
statistics=statistics,
stripe_size_bytes=stripe_size_bytes,
stripe_size_rows=stripe_size_rows,
row_index_stride=row_index_stride,
cols_as_map_type=cols_as_map_type,
storage_options=storage_options,
index=index,
)
@_cudf_nvtx_annotate
def stack(self, level=-1, dropna=True):
"""Stack the prescribed level(s) from columns to index
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to
the current DataFrame. The new inner-most levels are created
by pivoting the columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list default -1
Level(s) to stack from the column axis onto the index axis,
defined as one index or label, or a list of indices or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with missing
values. When multiple levels are specified, `dropna==False` is
unsupported.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = cudf.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat height 1
weight 0
dog height 3
weight 2
dtype: int64
**Multi level columns: simple case**
>>> import pandas as pd
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = cudf.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = cudf.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NULLs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg <NA> 1.0
m 2.0 <NA>
dog kg <NA> 3.0
m 4.0 <NA>
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height <NA> 2.0
weight 1.0 <NA>
dog height <NA> 4.0
weight 3.0 <NA>
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
"""
if isinstance(level, (int, str)):
level = [level]
elif isinstance(level, list):
if not all(isinstance(lv, (int, str)) for lv in level):
raise ValueError(
"level must be either an int/str, or a list of int/str."
)
else:
raise ValueError(
"level must be either an int/str, or a list of int/str."
)
level = [level] if not isinstance(level, list) else level
if len(level) > 1 and not dropna:
raise NotImplementedError(
"When stacking multiple levels, setting `dropna` to False "
"will generate new column combination that does not exist "
"in original dataframe. This behavior is unsupported in "
"cuDF. See pandas deprecation note: "
"https://github.com/pandas-dev/pandas/issues/53515"
)
# Compute the columns to stack based on specified levels
level_indices: list[int] = []
# If all passed in level names match up to the dataframe column's level
# names, cast them to indices
if all(lv in self._data.level_names for lv in level):
level_indices = [self._data.level_names.index(lv) for lv in level]
elif not all(isinstance(lv, int) for lv in level):
raise ValueError(
"`level` must either be a list of names or positions, not a "
"mixture of both."
)
else:
# Must be a list of positions, normalize negative positions
level_indices = [
lv + self._data.nlevels if lv < 0 else lv for lv in level
]
unnamed_levels_indices = [
i for i in range(self._data.nlevels) if i not in level_indices
]
has_unnamed_levels = len(unnamed_levels_indices) > 0
column_name_idx = self._data.to_pandas_index()
# Construct new index from the levels specified by `level`
named_levels = pd.MultiIndex.from_arrays(
[column_name_idx.get_level_values(lv) for lv in level_indices]
)
# Since `level` may only specify a subset of all levels, `unique()` is
# required to remove duplicates. In pandas, the order of the keys in
# the specified levels are always sorted.
unique_named_levels = named_levels.unique().sort_values()
# Each index from the original dataframe should repeat by the number
# of unique values in the named_levels
repeated_index = self.index.repeat(len(unique_named_levels))
# Each column name should tile itself by len(df) times
tiled_index = libcudf.reshape.tile(
[
as_column(unique_named_levels.get_level_values(i))
for i in range(unique_named_levels.nlevels)
],
self.shape[0],
)
# Assemble the final index
new_index_columns = [*repeated_index._columns, *tiled_index]
index_names = [*self._index.names, *unique_named_levels.names]
new_index = MultiIndex.from_frame(
DataFrame._from_data(
dict(zip(range(0, len(new_index_columns)), new_index_columns))
),
names=index_names,
)
# Compute the column indices that serves as the input for
# `interleave_columns`
column_idx_df = pd.DataFrame(
data=range(len(self._data)), index=named_levels
)
column_indices: list[list[int]] = []
if has_unnamed_levels:
unnamed_level_values = list(
map(column_name_idx.get_level_values, unnamed_levels_indices)
)
unnamed_level_values = pd.MultiIndex.from_arrays(
unnamed_level_values
)
def unnamed_group_generator():
if has_unnamed_levels:
for _, grpdf in column_idx_df.groupby(by=unnamed_level_values):
# When stacking part of the levels, some combinations
# of keys may not be present in this group but can be
# present in others. Reindexing with the globally computed
# `unique_named_levels` assigns -1 to these key
# combinations, representing an all-null column that
# is used in the subsequent libcudf call.
yield grpdf.reindex(
unique_named_levels, axis=0, fill_value=-1
).sort_index().values
else:
yield column_idx_df.sort_index().values
column_indices = list(unnamed_group_generator())
# For each of the group constructed from the unnamed levels,
# invoke `interleave_columns` to stack the values.
stacked = []
for column_idx in column_indices:
# Collect columns based on indices, append None for -1 indices.
columns = [
None if i == -1 else self._data.select_by_index(i).columns[0]
for i in column_idx
]
# Collect datatypes and cast columns as that type
common_type = np.result_type(
*(col.dtype for col in columns if col is not None)
)
all_nulls = functools.cache(
functools.partial(
column_empty, self.shape[0], common_type, masked=True
)
)
# homogenize the dtypes of the columns
homogenized = [
col.astype(common_type) if col is not None else all_nulls()
for col in columns
]
stacked.append(libcudf.reshape.interleave_columns(homogenized))
# Construct the resulting dataframe / series
if not has_unnamed_levels:
result = Series._from_data(
data={None: stacked[0]}, index=new_index
)
else:
if unnamed_level_values.nlevels == 1:
unnamed_level_values = unnamed_level_values.get_level_values(0)
unnamed_level_values = unnamed_level_values.unique().sort_values()
data = ColumnAccessor(
dict(zip(unnamed_level_values, stacked)),
isinstance(unnamed_level_values, pd.MultiIndex),
unnamed_level_values.names,
)
result = DataFrame._from_data(data, index=new_index)
if dropna:
return result.dropna(how="all")
else:
return result
@_cudf_nvtx_annotate
def cov(self, **kwargs):
"""Compute the covariance matrix of a DataFrame.
Parameters
----------
**kwargs
Keyword arguments to be passed to cupy.cov
Returns
-------
cov : DataFrame
"""
cov = cupy.cov(self.values, rowvar=False)
cols = self._data.to_pandas_index()
df = DataFrame(cupy.asfortranarray(cov)).set_index(cols)
df._set_column_names_like(self)
return df
def corr(self, method="pearson", min_periods=None):
"""Compute the correlation matrix of a DataFrame.
Parameters
----------
method : {'pearson', 'spearman'}, default 'pearson'
Method used to compute correlation:
- pearson : Standard correlation coefficient
- spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns to
have a valid result.
Returns
-------
DataFrame
The requested correlation matrix.
"""
if method == "pearson":
values = self.values
elif method == "spearman":
values = self.rank().values
else:
raise ValueError("method must be either 'pearson', 'spearman'")
if min_periods is not None:
raise NotImplementedError("Unsupported argument 'min_periods'")
corr = cupy.corrcoef(values, rowvar=False)
cols = self._data.to_pandas_index()
df = DataFrame(cupy.asfortranarray(corr)).set_index(cols)
df._set_column_names_like(self)
return df
@_cudf_nvtx_annotate
def to_struct(self, name=None):
"""
Return a struct Series composed of the columns of the DataFrame.
Parameters
----------
name: optional
Name of the resulting Series
Notes
-----
Note that a copy of the columns is made.
"""
if not all(isinstance(name, str) for name in self._data.names):
warnings.warn(
"DataFrame contains non-string column name(s). Struct column "
"requires field name to be string. Non-string column names "
"will be casted to string as the field name."
)
field_names = [str(name) for name in self._data.names]
col = cudf.core.column.build_struct_column(
names=field_names,
children=tuple(col.copy(deep=True) for col in self._data.columns),
size=len(self),
)
return cudf.Series._from_data(
cudf.core.column_accessor.ColumnAccessor({name: col}),
index=self.index,
name=name,
)
@_cudf_nvtx_annotate
def keys(self):
"""
Get the columns.
This is index for Series, columns for DataFrame.
Returns
-------
Index
Columns of DataFrame.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'one' : [1, 2, 3], 'five' : ['a', 'b', 'c']})
>>> df
one five
0 1 a
1 2 b
2 3 c
>>> df.keys()
Index(['one', 'five'], dtype='object')
>>> df = cudf.DataFrame(columns=[0, 1, 2, 3])
>>> df
Empty DataFrame
Columns: [0, 1, 2, 3]
Index: []
>>> df.keys()
Int64Index([0, 1, 2, 3], dtype='int64')
"""
return self._data.to_pandas_index()
def itertuples(self, index=True, name="Pandas"):
"""
Iteration is unsupported.
See :ref:`iteration <pandas-comparison/iteration>` for more
information.
"""
raise TypeError(
"cuDF does not support iteration of DataFrame "
"via itertuples. Consider using "
"`.to_pandas().itertuples()` "
"if you wish to iterate over namedtuples."
)
def iterrows(self):
"""
Iteration is unsupported.
See :ref:`iteration <pandas-comparison/iteration>` for more
information.
"""
raise TypeError(
"cuDF does not support iteration of DataFrame "
"via iterrows. Consider using "
"`.to_pandas().iterrows()` "
"if you wish to iterate over each row."
)
@_cudf_nvtx_annotate
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
):
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
sort : bool, default False
Sort columns ordering if the columns of
`self` and `other` are not aligned.
verify_integrity : bool, default False
This Parameter is currently not supported.
Returns
-------
DataFrame
See Also
--------
cudf.concat : General function to concatenate DataFrame or
objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a cudf DataFrame can be more
computationally intensive than a single concatenate. A better
solution is to append those rows to a list and then concatenate
the list with the original DataFrame all at once.
`verify_integrity` parameter is not supported yet.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = cudf.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df2
A B
0 5 6
1 7 8
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = cudf.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient than above:
>>> cudf.concat([cudf.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = DataFrame(other)
elif isinstance(other, Series):
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
current_cols = self._data.to_pandas_index()
combined_columns = other.index.to_pandas()
if len(current_cols):
if cudf.utils.dtypes.is_mixed_with_object_dtype(
current_cols, combined_columns
):
raise TypeError(
"cudf does not support mixed types, please type-cast "
"the column index of dataframe and index of series "
"to same dtypes."
)
combined_columns = current_cols.union(
combined_columns, sort=False
)
if sort:
combined_columns = combined_columns.sort_values()
other = other.reindex(combined_columns, copy=False).to_frame().T
if not current_cols.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif (
isinstance(other, list)
and other
and not isinstance(other[0], DataFrame)
):
other = DataFrame(other)
cols = self._data.to_pandas_index()
if (cols.get_indexer(other._data.to_pandas_index()) >= 0).all():
other = other.reindex(columns=cols)
return super()._append(other, ignore_index, verify_integrity, sort)
@_cudf_nvtx_annotate
@copy_docstring(reshape.pivot)
def pivot(self, index, columns, values=None):
return cudf.core.reshape.pivot(
self, index=index, columns=columns, values=values
)
@_cudf_nvtx_annotate
@copy_docstring(reshape.pivot_table)
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=None,
margins_name="All",
observed=False,
sort=True,
):
return cudf.core.reshape.pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
sort=sort,
)
@_cudf_nvtx_annotate
@copy_docstring(reshape.unstack)
def unstack(self, level=-1, fill_value=None):
return cudf.core.reshape.unstack(
self, level=level, fill_value=fill_value
)
@_cudf_nvtx_annotate
def explode(self, column, ignore_index=False):
"""
Transform each element of a list-like to a row, replicating index
values.
Parameters
----------
column : str
Column to explode.
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
Returns
-------
DataFrame
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({
... "a": [[1, 2, 3], [], None, [4, 5]],
... "b": [11, 22, 33, 44],
... })
>>> df
a b
0 [1, 2, 3] 11
1 [] 22
2 None 33
3 [4, 5] 44
>>> df.explode('a')
a b
0 1 11
0 2 11
0 3 11
1 <NA> 22
2 <NA> 33
3 4 44
3 5 44
"""
if column not in self._column_names:
raise KeyError(column)
return super()._explode(column, ignore_index)
def pct_change(
self, periods=1, fill_method="ffill", limit=None, freq=None
):
"""
Calculates the percent change between sequential elements
in the DataFrame.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'ffill'
How to handle NAs before computing percent changes.
limit : int, optional
The number of consecutive NAs to fill before stopping.
Not yet implemented.
freq : str, optional
Increment to use from time series API.
Not yet implemented.
Returns
-------
DataFrame
"""
if limit is not None:
raise NotImplementedError("limit parameter not supported yet.")
if freq is not None:
raise NotImplementedError("freq parameter not supported yet.")
elif fill_method not in {"ffill", "pad", "bfill", "backfill"}:
raise ValueError(
"fill_method must be one of 'ffill', 'pad', "
"'bfill', or 'backfill'."
)
data = self.fillna(method=fill_method, limit=limit)
return data.diff(periods=periods) / data.shift(
periods=periods, freq=freq
)
def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
):
return df_protocol.__dataframe__(
self, nan_as_null=nan_as_null, allow_copy=allow_copy
)
def nunique(self, axis=0, dropna=True):
"""
Count number of distinct elements in specified axis.
Return Series with number of distinct elements. Can ignore NaN values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]})
>>> df.nunique()
A 3
B 2
dtype: int64
"""
if axis != 0:
raise NotImplementedError("axis parameter is not supported yet.")
return cudf.Series(super().nunique(dropna=dropna))
def _sample_axis_1(
self,
n: int,
weights: Optional[ColumnLike],
replace: bool,
random_state: np.random.RandomState,
ignore_index: bool,
):
if replace:
# Since cuDF does not support multiple columns with same name,
# sample with replace=True at axis 1 is unsupported.
raise NotImplementedError(
"Sample is not supported for axis 1/`columns` when"
"`replace=True`."
)
sampled_column_labels = random_state.choice(
self._column_names, size=n, replace=False, p=weights
)
result = self._get_columns_by_label(sampled_column_labels)
if ignore_index:
result.reset_index(drop=True)
return result
def _from_columns_like_self(
self,
columns: List[ColumnBase],
column_names: abc.Iterable[str],
index_names: Optional[List[str]] = None,
*,
override_dtypes: Optional[abc.Iterable[Optional[Dtype]]] = None,
) -> DataFrame:
result = super()._from_columns_like_self(
columns,
column_names,
index_names,
override_dtypes=override_dtypes,
)
result._set_column_names_like(self)
return result
@_cudf_nvtx_annotate
def interleave_columns(self):
"""
Interleave Series columns of a table into a single column.
Converts the column major table `cols` into a row major column.
Parameters
----------
cols : input Table containing columns to interleave.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({0: ['A1', 'A2', 'A3'], 1: ['B1', 'B2', 'B3']})
>>> df
0 1
0 A1 B1
1 A2 B2
2 A3 B3
>>> df.interleave_columns()
0 A1
1 B1
2 A2
3 B2
4 A3
5 B3
dtype: object
Returns
-------
The interleaved columns as a single column
"""
if ("category" == self.dtypes).any():
raise ValueError(
"interleave_columns does not support 'category' dtype."
)
return self._constructor_sliced._from_data(
{None: libcudf.reshape.interleave_columns([*self._columns])}
)
@_cudf_nvtx_annotate
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
Not supported.
Returns
-------
DataFrame, Series, or None
Series if a single column is returned (the typical use case),
DataFrame if any assignment statements are included in
``expr``, or None if ``inplace=True``.
Notes
-----
Difference from pandas:
* Additional kwargs are not supported.
* Bitwise and logical operators are not dtype-dependent.
Specifically, `&` must be used for bitwise operators on integers,
not `and`, which is specifically for the logical and between
booleans.
* Only numerical types currently support all operators.
* String types currently support comparison operators.
* Operators generally will not cast automatically. Users are
responsible for casting columns to suitable types before
evaluating a function.
* Multiple assignments to the same name (i.e. a sequence of
assignment statements where later statements are conditioned upon
the output of earlier statements) is not supported.
Examples
--------
>>> df = cudf.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
if kwargs:
raise ValueError(
"Keyword arguments other than `inplace` are not supported"
)
# Have to use a regex match to avoid capturing ==, >=, or <=
equals_sign_regex = "[^=><]=[^=]"
includes_assignment = re.search(equals_sign_regex, expr) is not None
# Check if there were multiple statements. Filter out empty lines.
statements = tuple(filter(None, expr.strip().split("\n")))
if len(statements) > 1 and any(
re.search(equals_sign_regex, st) is None for st in statements
):
raise ValueError(
"Multi-line expressions are only valid if all expressions "
"contain an assignment."
)
if not includes_assignment:
if inplace:
raise ValueError(
"Cannot operate inplace if there is no assignment"
)
return Series._from_data(
{
None: libcudf.transform.compute_column(
[*self._columns], self._column_names, statements[0]
)
}
)
targets = []
exprs = []
for st in statements:
try:
t, e = re.split("[^=]=[^=]", st)
except ValueError as err:
if "too many values" in str(err):
raise ValueError(
f"Statement {st} contains too many assignments ('=')"
)
raise
targets.append(t.strip())
exprs.append(e.strip())
cols = (
libcudf.transform.compute_column(
[*self._columns], self._column_names, e
)
for e in exprs
)
ret = self if inplace else self.copy(deep=False)
for name, col in zip(targets, cols):
ret._data[name] = col
if not inplace:
return ret
def value_counts(
self,
subset=None,
normalize=False,
sort=True,
ascending=False,
dropna=True,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
Parameters
----------
subset: list-like, optional
Columns to use when counting unique combinations.
normalize: bool, default False
Return proportions rather than frequencies.
sort: bool, default True
Sort by frequencies.
ascending: bool, default False
Sort in ascending order.
dropna: bool, default True
Don't include counts of rows that contain NA values.
Returns
-------
Series
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
"""
if subset:
diff = set(subset) - set(self._data)
if len(diff) != 0:
raise KeyError(f"columns {diff} do not exist")
columns = list(self._data.names) if subset is None else subset
result = (
self.groupby(
by=columns,
dropna=dropna,
)
.size()
.astype("int64")
)
if sort:
result = result.sort_values(ascending=ascending)
if normalize:
result = result / result._column.sum()
# Pandas always returns MultiIndex even if only one column.
if not isinstance(result.index, MultiIndex):
result.index = MultiIndex._from_data(result._index._data)
return result
def from_dataframe(df, allow_copy=False):
return df_protocol.from_dataframe(df, allow_copy=allow_copy)
def make_binop_func(op, postprocess=None):
# This function is used to wrap binary operations in Frame with an
# appropriate API for DataFrame as required for pandas compatibility. The
# main effect is reordering and error-checking parameters in
# DataFrame-specific ways. The postprocess argument is a callable that may
# optionally be provided to modify the result of the binop if additional
# processing is needed for pandas compatibility. The callable must have the
# signature
# def postprocess(left, right, output)
# where left and right are the inputs to the binop and output is the result
# of calling the wrapped Frame binop.
wrapped_func = getattr(IndexedFrame, op)
@functools.wraps(wrapped_func)
def wrapper(self, other, axis="columns", level=None, fill_value=None):
if axis not in (1, "columns"):
raise NotImplementedError("Only axis=1 supported at this time.")
output = wrapped_func(self, other, axis, level, fill_value)
if postprocess is None:
return output
return postprocess(self, other, output)
# functools.wraps copies module level attributes to `wrapper` and sets
# __wrapped__ attributes to `wrapped_func`. Cpython looks up the signature
# string of a function by recursively delving into __wrapped__ until
# it hits the first function that has __signature__ attribute set. To make
# the signature string of `wrapper` matches with its actual parameter list,
# we directly set the __signature__ attribute of `wrapper` below.
new_sig = inspect.signature(
lambda self, other, axis="columns", level=None, fill_value=None: None
)
wrapper.__signature__ = new_sig
return wrapper
# Wrap arithmetic Frame binop functions with the expected API for Series.
for binop in [
"add",
"radd",
"subtract",
"sub",
"rsub",
"multiply",
"mul",
"rmul",
"mod",
"rmod",
"pow",
"rpow",
"floordiv",
"rfloordiv",
"truediv",
"div",
"divide",
"rtruediv",
"rdiv",
]:
setattr(DataFrame, binop, make_binop_func(binop))
def _make_replacement_func(value):
# This function generates a postprocessing function suitable for use with
# make_binop_func that fills null columns with the desired fill value.
def func(left, right, output):
# This function may be passed as the postprocess argument to
# make_binop_func. Columns that are only present in one of the inputs
# will be null in the output. This function postprocesses the output to
# replace those nulls with some desired output.
if isinstance(right, Series):
uncommon_columns = set(left._column_names) ^ set(right.index)
elif isinstance(right, DataFrame):
uncommon_columns = set(left._column_names) ^ set(
right._column_names
)
elif _is_scalar_or_zero_d_array(right):
for name, col in output._data.items():
output._data[name] = col.fillna(value)
return output
else:
return output
for name in uncommon_columns:
output._data[name] = column.full(
size=len(output), fill_value=value, dtype="bool"
)
return output
return func
# The ne comparator needs special postprocessing because elements that missing
# in one operand should be treated as null and result in True in the output
# rather than simply propagating nulls.
DataFrame.ne = make_binop_func("ne", _make_replacement_func(True))
# All other comparison operators needs return False when one of the operands is
# missing in the input.
for binop in [
"eq",
"lt",
"le",
"gt",
"ge",
]:
setattr(
DataFrame, binop, make_binop_func(binop, _make_replacement_func(False))
)
@_cudf_nvtx_annotate
def from_pandas(obj, nan_as_null=no_default):
"""
Convert certain Pandas objects into the cudf equivalent.
Supports DataFrame, Series, Index, or MultiIndex.
Returns
-------
DataFrame/Series/Index/MultiIndex
Return type depends on the passed input.
Raises
------
TypeError for invalid input type.
Examples
--------
>>> import cudf
>>> import pandas as pd
>>> data = [[0, 1], [1, 2], [3, 4]]
>>> pdf = pd.DataFrame(data, columns=['a', 'b'], dtype=int)
>>> pdf
a b
0 0 1
1 1 2
2 3 4
>>> gdf = cudf.from_pandas(pdf)
>>> gdf
a b
0 0 1
1 1 2
2 3 4
>>> type(gdf)
<class 'cudf.core.dataframe.DataFrame'>
>>> type(pdf)
<class 'pandas.core.frame.DataFrame'>
Converting a Pandas Series to cuDF Series:
>>> psr = pd.Series(['a', 'b', 'c', 'd'], name='apple', dtype='str')
>>> psr
0 a
1 b
2 c
3 d
Name: apple, dtype: object
>>> gsr = cudf.from_pandas(psr)
>>> gsr
0 a
1 b
2 c
3 d
Name: apple, dtype: object
>>> type(gsr)
<class 'cudf.core.series.Series'>
>>> type(psr)
<class 'pandas.core.series.Series'>
Converting a Pandas Index to cuDF Index:
>>> pidx = pd.Index([1, 2, 10, 20])
>>> pidx
Int64Index([1, 2, 10, 20], dtype='int64')
>>> gidx = cudf.from_pandas(pidx)
>>> gidx
Int64Index([1, 2, 10, 20], dtype='int64')
>>> type(gidx)
<class 'cudf.core.index.Int64Index'>
>>> type(pidx)
<class 'pandas.core.indexes.numeric.Int64Index'>
Converting a Pandas MultiIndex to cuDF MultiIndex:
>>> pmidx = pd.MultiIndex(
... levels=[[1, 3, 4, 5], [1, 2, 5]],
... codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]],
... names=["x", "y"],
... )
>>> pmidx
MultiIndex([(1, 1),
(1, 5),
(3, 2),
(4, 2),
(5, 1)],
names=['x', 'y'])
>>> gmidx = cudf.from_pandas(pmidx)
>>> gmidx
MultiIndex([(1, 1),
(1, 5),
(3, 2),
(4, 2),
(5, 1)],
names=['x', 'y'])
>>> type(gmidx)
<class 'cudf.core.multiindex.MultiIndex'>
>>> type(pmidx)
<class 'pandas.core.indexes.multi.MultiIndex'>
"""
if nan_as_null is no_default:
nan_as_null = (
False if cudf.get_option("mode.pandas_compatible") else None
)
if isinstance(obj, pd.DataFrame):
return DataFrame.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.Series):
return Series.from_pandas(obj, nan_as_null=nan_as_null)
# This carveout for cudf.pandas is undesirable, but fixes crucial issues
# for core RAPIDS projects like cuML and cuGraph that rely on
# `cudf.from_pandas`, so we allow it for now.
elif (ret := getattr(obj, "_fsproxy_wrapped", None)) is not None:
return ret
elif isinstance(obj, pd.MultiIndex):
return MultiIndex.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.RangeIndex):
return cudf.core.index.RangeIndex(
start=obj.start, stop=obj.stop, step=obj.step, name=obj.name
)
elif isinstance(obj, pd.Index):
return cudf.Index.from_pandas(obj, nan_as_null=nan_as_null)
elif isinstance(obj, pd.CategoricalDtype):
return cudf.CategoricalDtype.from_pandas(obj)
else:
raise TypeError(
"from_pandas only accepts Pandas Dataframes, Series, "
"Index, RangeIndex and MultiIndex objects. "
"Got %s" % type(obj)
)
@_cudf_nvtx_annotate
def merge(left, right, *args, **kwargs):
if isinstance(left, Series):
left = left.to_frame()
return left.merge(right, *args, **kwargs)
# a bit of fanciness to inject docstring with left parameter
merge_doc = DataFrame.merge.__doc__
if merge_doc is not None:
idx = merge_doc.find("right")
merge.__doc__ = "".join(
[
merge_doc[:idx],
"\n\tleft : Series or DataFrame\n\t",
merge_doc[idx:],
]
)
def _align_indices(lhs, rhs):
"""
Internal util to align the indices of two DataFrames. Returns a tuple of
the aligned dataframes, or the original arguments if the indices are the
same, or if rhs isn't a DataFrame.
"""
lhs_out, rhs_out = lhs, rhs
if isinstance(rhs, DataFrame) and not lhs.index.equals(rhs.index):
df = lhs.merge(
rhs,
sort=True,
how="outer",
left_index=True,
right_index=True,
suffixes=("_x", "_y"),
)
df = df.sort_index()
lhs_out = DataFrame(index=df.index)
rhs_out = DataFrame(index=df.index)
common = set(lhs._column_names) & set(rhs._column_names)
common_x = {f"{x}_x": x for x in common}
common_y = {f"{x}_y": x for x in common}
for col in df._column_names:
if col in common_x:
lhs_out[common_x[col]] = df[col]
elif col in common_y:
rhs_out[common_y[col]] = df[col]
elif col in lhs:
lhs_out[col] = df[col]
elif col in rhs:
rhs_out[col] = df[col]
return lhs_out, rhs_out
def _setitem_with_dataframe(
input_df: DataFrame,
replace_df: DataFrame,
input_cols: Any = None,
mask: Optional[ColumnBase] = None,
ignore_index: bool = False,
):
"""
This function sets item dataframes relevant columns with replacement df
:param input_df: Dataframe to be modified inplace
:param replace_df: Replacement DataFrame to replace values with
:param input_cols: columns to replace in the input dataframe
:param mask: boolean mask in case of masked replacing
:param ignore_index: Whether to conduct index equality and reindex
"""
if input_cols is None:
input_cols = input_df._column_names
if len(input_cols) != len(replace_df._column_names):
raise ValueError(
"Number of Input Columns must be same replacement Dataframe"
)
if (
not ignore_index
and len(input_df) != 0
and not input_df.index.equals(replace_df.index)
):
replace_df = replace_df.reindex(input_df.index)
for col_1, col_2 in zip(input_cols, replace_df._column_names):
if col_1 in input_df._column_names:
if mask is not None:
input_df._data[col_1][mask] = column.as_column(
replace_df[col_2]
)
else:
input_df._data[col_1] = column.as_column(replace_df[col_2])
else:
if mask is not None:
raise ValueError("Can not insert new column with a bool mask")
else:
# handle append case
input_df._insert(
loc=len(input_df._data),
name=col_1,
value=replace_df[col_2],
)
def extract_col(df, col):
"""
Extract column from dataframe `df` with their name `col`.
If `col` is index and there are no columns with name `index`,
then this will return index column.
"""
try:
return df._data[col]
except KeyError:
if (
col == "index"
and col not in df.index._data
and not isinstance(df.index, MultiIndex)
):
return df.index._data.columns[0]
return df.index._data[col]
def _get_union_of_indices(indexes):
if len(indexes) == 1:
return indexes[0]
else:
merged_index = cudf.core.index.GenericIndex._concat(indexes)
return merged_index.drop_duplicates()
def _get_union_of_series_names(series_list):
names_list = []
unnamed_count = 0
for series in series_list:
if series.name is None:
names_list.append(f"Unnamed {unnamed_count}")
unnamed_count += 1
else:
names_list.append(series.name)
if unnamed_count == len(series_list):
names_list = range(len(series_list))
return names_list
# Create a dictionary of the common, non-null columns
def _get_non_null_cols_and_dtypes(col_idxs, list_of_columns):
# A mapping of {idx: np.dtype}
dtypes = dict()
# A mapping of {idx: [...columns]}, where `[...columns]`
# is a list of columns with at least one valid value for each
# column name across all input frames
non_null_columns = dict()
for idx in col_idxs:
for cols in list_of_columns:
# Skip columns not in this frame
if idx >= len(cols) or cols[idx] is None:
continue
# Store the first dtype we find for a column, even if it's
# all-null. This ensures we always have at least one dtype
# for each name. This dtype will be overwritten later if a
# non-null Column with the same name is found.
if idx not in dtypes:
dtypes[idx] = cols[idx].dtype
if cols[idx].valid_count > 0:
if idx not in non_null_columns:
non_null_columns[idx] = [cols[idx]]
else:
non_null_columns[idx].append(cols[idx])
return non_null_columns, dtypes
def _find_common_dtypes_and_categories(non_null_columns, dtypes):
# A mapping of {idx: categories}, where `categories` is a
# column of all the unique categorical values from each
# categorical column across all input frames
categories = dict()
for idx, cols in non_null_columns.items():
# default to the first non-null dtype
dtypes[idx] = cols[0].dtype
# If all the non-null dtypes are int/float, find a common dtype
if all(is_numeric_dtype(col.dtype) for col in cols):
dtypes[idx] = find_common_type([col.dtype for col in cols])
# If all categorical dtypes, combine the categories
elif all(
isinstance(col, cudf.core.column.CategoricalColumn) for col in cols
):
# Combine and de-dupe the categories
categories[idx] = cudf.Series(
concat_columns([col.categories for col in cols])
)._column.unique()
# Set the column dtype to the codes' dtype. The categories
# will be re-assigned at the end
dtypes[idx] = min_scalar_type(len(categories[idx]))
# Otherwise raise an error if columns have different dtypes
elif not all(is_dtype_equal(c.dtype, dtypes[idx]) for c in cols):
raise ValueError("All columns must be the same type")
return categories
def _cast_cols_to_common_dtypes(col_idxs, list_of_columns, dtypes, categories):
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
for idx in col_idxs:
dtype = dtypes[idx]
for cols in list_of_columns:
# If column not in this df, fill with an all-null column
if idx >= len(cols) or cols[idx] is None:
n = len(next(x for x in cols if x is not None))
cols[idx] = column_empty(row_count=n, dtype=dtype, masked=True)
else:
# If column is categorical, rebase the codes with the
# combined categories, and cast the new codes to the
# min-scalar-sized dtype
if idx in categories:
cols[idx] = (
cols[idx]
._set_categories(
categories[idx],
is_unique=True,
)
.codes
)
cols[idx] = cols[idx].astype(dtype)
def _reassign_categories(categories, cols, col_idxs):
for name, idx in zip(cols, col_idxs):
if idx in categories:
cols[name] = build_categorical_column(
categories=categories[idx],
codes=build_column(
cols[name].base_data, dtype=cols[name].dtype
),
mask=cols[name].base_mask,
offset=cols[name].offset,
size=cols[name].size,
)
def _from_dict_create_index(indexlist, namelist, library):
if len(namelist) > 1:
index = library.MultiIndex.from_tuples(indexlist, names=namelist)
else:
index = library.Index(indexlist, name=namelist[0])
return index
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.