repo_id
stringlengths
21
96
file_path
stringlengths
31
155
content
stringlengths
1
92.9M
__index_level_0__
int64
0
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_udf_masked_ops.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. import math import operator import numpy as np import pytest from numba import cuda import cudf from cudf.core.missing import NA from cudf.core.udf._ops import ( arith_ops, bitwise_ops, comparison_ops, unary_ops, ) from cudf.core.udf.api import Masked from cudf.core.udf.utils import precompiled from cudf.testing._utils import ( _decimal_series, assert_eq, parametrize_numeric_dtypes_pairwise, sv_to_udf_str, ) @pytest.fixture(scope="module") def str_udf_data(): return cudf.DataFrame( { "str_col": [ "abc", "ABC", "AbC", "123", "123aBc", "123@.!", "", "rapids ai", "gpu", "True", "False", "1.234", ".123a", "0.013", "1.0", "01", "20010101", "cudf", "cuda", "gpu", "This Is A Title", "This is Not a Title", "Neither is This a Title", "NoT a TiTlE", "123 Title Works", ] } ) @pytest.fixture(params=["a", "cu", "2", "gpu", "", " "]) def substr(request): return request.param def run_masked_udf_test(func, data, args=(), **kwargs): gdf = data pdf = data.to_pandas(nullable=True) expect = pdf.apply(func, args=args, axis=1) obtain = gdf.apply(func, args=args, axis=1) assert_eq(expect, obtain, **kwargs) def run_masked_string_udf_test(func, data, args=(), **kwargs): gdf = data pdf = data.to_pandas(nullable=True) def row_wrapper(row): st = row["str_col"] return func(st) expect = pdf.apply(row_wrapper, args=args, axis=1) func = cuda.jit(device=True)(func) obtain = gdf.apply(row_wrapper, args=args, axis=1) assert_eq(expect, obtain, **kwargs) # strings that come directly from input columns are backed by # MaskedType(string_view) types. But new strings that are returned # from functions or operators are backed by MaskedType(udf_string) # types. We need to make sure all of our methods work on both kind # of MaskedType. This function promotes the former to the latter # prior to running the input function def udf_string_wrapper(row): masked_udf_str = Masked( sv_to_udf_str(row["str_col"].value), row["str_col"].valid ) return func(masked_udf_str) obtain = gdf.apply(udf_string_wrapper, args=args, axis=1) assert_eq(expect, obtain, **kwargs) def run_masked_udf_series(func, data, args=(), **kwargs): gsr = data psr = data.to_pandas(nullable=True) expect = psr.apply(func, args=args) obtain = gsr.apply(func, args=args) assert_eq(expect, obtain, **kwargs) @pytest.mark.parametrize("op", arith_ops) def test_arith_masked_vs_masked(op): # This test should test all the typing # and lowering for arithmetic ops between # two columns def func(row): x = row["a"] y = row["b"] return op(x, y) gdf = cudf.DataFrame({"a": [1, None, 3, None], "b": [4, 5, None, None]}) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", bitwise_ops) def test_bitwise_masked_vs_masked(op): # This test should test all the typing # and lowering for bitwise ops between # two columns def func(row): x = row["a"] y = row["b"] return op(x, y) gdf = cudf.DataFrame( { "a": [1, 0, 1, 0, 0b1011, 42, None], "b": [1, 1, 0, 0, 0b1100, -42, 5], } ) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize( "dtype_l", ["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"], ) @pytest.mark.parametrize( "dtype_r", [ "timedelta64[ns]", "timedelta64[us]", "timedelta64[ms]", "timedelta64[s]", "datetime64[ns]", "datetime64[ms]", "datetime64[us]", "datetime64[s]", ], ) @pytest.mark.parametrize("op", [operator.add, operator.sub]) def test_arith_masked_vs_masked_datelike(op, dtype_l, dtype_r): # Datetime version of the above # does not test all dtype combinations for now if "datetime" in dtype_l and "datetime" in dtype_r and op is operator.add: # don't try adding datetimes to datetimes. pytest.skip("Adding datetime to datetime is not valid") def func(row): x = row["a"] y = row["b"] return op(x, y) gdf = cudf.DataFrame( { "a": ["2011-01-01", cudf.NA, "2011-03-01", cudf.NA], "b": [4, 5, cudf.NA, cudf.NA], } ) gdf["a"] = gdf["a"].astype(dtype_l) gdf["b"] = gdf["b"].astype(dtype_r) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", comparison_ops) def test_compare_masked_vs_masked(op): # this test should test all the # typing and lowering for comparisons # between columns def func(row): x = row["a"] y = row["b"] return op(x, y) # we should get: # [?, ?, <NA>, <NA>, <NA>] gdf = cudf.DataFrame( {"a": [1, 0, None, 1, None], "b": [0, 1, 0, None, None]} ) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", arith_ops) @pytest.mark.parametrize("constant", [1, 1.5, True, False]) @pytest.mark.parametrize("data", [[1, 2, cudf.NA]]) def test_arith_masked_vs_constant(op, constant, data): def func(row): x = row["data"] return op(x, constant) gdf = cudf.DataFrame({"data": data}) if constant is False and op in { operator.mod, operator.pow, operator.truediv, operator.floordiv, operator.imod, operator.ipow, operator.itruediv, operator.ifloordiv, }: # The following tests cases yield undefined behavior: # - truediv(x, False) because its dividing by zero # - floordiv(x, False) because its dividing by zero # - mod(x, False) because its mod by zero, # - pow(x, False) because we have an NA in the series and pandas # insists that (NA**0 == 1) where we do not pytest.skip() run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", arith_ops) @pytest.mark.parametrize("constant", [1, 1.5, True, False]) @pytest.mark.parametrize("data", [[2, 3, cudf.NA], [1, cudf.NA, 1]]) def test_arith_masked_vs_constant_reflected(request, op, constant, data): def func(row): x = row["data"] return op(constant, x) # Just a single column -> result will be all NA gdf = cudf.DataFrame({"data": data}) # cudf differs from pandas for 1**NA request.applymarker( pytest.mark.xfail( condition=(constant == 1 and op in {operator.pow, operator.ipow}), reason="https://github.com/rapidsai/cudf/issues/7478", ) ) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", arith_ops) @pytest.mark.parametrize("data", [[1, cudf.NA, 3], [2, 3, cudf.NA]]) def test_arith_masked_vs_null(request, op, data): def func(row): x = row["data"] return op(x, NA) gdf = cudf.DataFrame({"data": data}) # In pandas, 1**NA == 1. request.applymarker( pytest.mark.xfail( condition=( (gdf["data"] == 1).any() and op in {operator.pow, operator.ipow} ), reason="https://github.com/rapidsai/cudf/issues/7478", ) ) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", arith_ops) def test_arith_masked_vs_null_reflected(op): def func(row): x = row["data"] return op(NA, x) gdf = cudf.DataFrame({"data": [1, None, 3]}) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("op", unary_ops) def test_unary_masked(op): # This test should test all the typing # and lowering for unary ops def func(row): x = row["a"] return op(x) if x is not NA else NA if "log" in op.__name__: gdf = cudf.DataFrame({"a": [0.1, 1.0, None, 3.5, 1e8]}) elif op.__name__ in {"asin", "acos"}: gdf = cudf.DataFrame({"a": [0.0, 0.5, None, 1.0]}) elif op.__name__ in {"atanh"}: gdf = cudf.DataFrame({"a": [0.0, -0.5, None, 0.8]}) elif op.__name__ in {"acosh", "sqrt", "lgamma"}: gdf = cudf.DataFrame({"a": [1.0, 2.0, None, 11.0]}) elif op.__name__ in {"gamma"}: gdf = cudf.DataFrame({"a": [0.1, 2, None, 4]}) elif op.__name__ in {"invert"}: gdf = cudf.DataFrame({"a": [-100, 128, None, 0]}, dtype="int64") else: gdf = cudf.DataFrame({"a": [-125.60, 395.2, 0.0, None]}) run_masked_udf_test(func, gdf, check_dtype=False) def test_masked_is_null_conditional(): def func(row): x = row["a"] y = row["b"] if x is NA: return y else: return x + y gdf = cudf.DataFrame({"a": [1, None, 3, None], "b": [4, 5, None, None]}) run_masked_udf_test(func, gdf, check_dtype=False) def test_apply_contains(): def func(row): x = row["a"] return x in [1, 2] gdf = cudf.DataFrame({"a": [1, 3]}) run_masked_udf_test(func, gdf, check_dtype=False) @parametrize_numeric_dtypes_pairwise @pytest.mark.parametrize("op", [operator.add, operator.and_, operator.eq]) def test_apply_mixed_dtypes(left_dtype, right_dtype, op): """ Test that operations can be performed between columns of different dtypes and return a column with the correct values and nulls """ # First perform the op on two dummy data on host, if numpy can # safely type cast, we should expect it to work in udf too. try: op(np.dtype(left_dtype).type(0), np.dtype(right_dtype).type(42)) except TypeError: pytest.skip("Operation is unsupported for corresponding dtype.") def func(row): x = row["a"] y = row["b"] return op(x, y) gdf = cudf.DataFrame({"a": [1.5, None, 3, None], "b": [4, 5, None, None]}) gdf["a"] = gdf["a"].astype(left_dtype) gdf["b"] = gdf["b"].astype(right_dtype) run_masked_udf_test(func, gdf, check_dtype=False) @pytest.mark.parametrize("val", [5, 5.5]) def test_apply_return_literal(val): """ Test unification codepath for scalars and MaskedType makes sure that numba knows how to cast a scalar value to a MaskedType """ def func(row): x = row["a"] y = row["b"] if x is not NA and x < 2: return val else: return x + y gdf = cudf.DataFrame({"a": [1, None, 3, None], "b": [4, 5, None, None]}) run_masked_udf_test(func, gdf, check_dtype=False) def test_apply_return_null(): """ Tests casting / unification of Masked and NA """ def func(row): x = row["a"] if x is NA: return NA else: return x gdf = cudf.DataFrame({"a": [1, None, 3]}) run_masked_udf_test(func, gdf, check_dtype=False) def test_apply_return_either_null_or_literal(): def func(row): x = row["a"] if x > 5: return 2 else: return NA gdf = cudf.DataFrame({"a": [1, 3, 6]}) run_masked_udf_test(func, gdf, check_dtype=False) def test_apply_return_literal_only(): def func(x): return 5 gdf = cudf.DataFrame({"a": [1, None, 3]}) run_masked_udf_test(func, gdf, check_dtype=False) def test_apply_everything(): def func(row): w = row["a"] x = row["b"] y = row["c"] z = row["d"] if x is NA: return w + y - z elif ((z > y) is not NA) and z > y: return x elif ((x + y) is not NA) and x + y == 0: return z / x elif x + y is NA: return 2.5 elif w > 100: return ( math.sin(x) + math.sqrt(y) - (-z) + math.lgamma(x) * math.fabs(-0.8) / math.radians(3.14) ) else: return y > 2 gdf = cudf.DataFrame( { "a": [1, 3, 6, 0, None, 5, None, 101], "b": [3.0, 2.5, None, 5.0, 1.0, 5.0, 11.0, 1.0], "c": [2, 3, 6, 0, None, 5, None, 6], "d": [4, None, 6, 0, None, 5, None, 7.5], } ) run_masked_udf_test(func, gdf, check_dtype=False) ### @pytest.mark.parametrize( "data,name", [([1, 2, 3], None), ([1, cudf.NA, 3], None), ([1, 2, 3], "test_name")], ) def test_series_apply_basic(data, name): data = cudf.Series(data, name=name) def func(x): return x + 1 run_masked_udf_series(func, data, check_dtype=False) def test_series_apply_null_conditional(): def func(x): if x is NA: return 42 else: return x - 1 data = cudf.Series([1, cudf.NA, 3]) run_masked_udf_series(func, data) ### @pytest.mark.parametrize("op", arith_ops) def test_series_arith_masked_vs_masked(op): def func(x): return op(x, x) data = cudf.Series([1, cudf.NA, 3]) run_masked_udf_series(func, data, check_dtype=False) @pytest.mark.parametrize("op", comparison_ops) def test_series_compare_masked_vs_masked(op): """ In the series case, only one other MaskedType to compare with - itself """ def func(x): return op(x, x) data = cudf.Series([1, cudf.NA, 3]) run_masked_udf_series(func, data, check_dtype=False) @pytest.mark.parametrize("op", arith_ops) @pytest.mark.parametrize("constant", [1, 1.5, cudf.NA]) def test_series_arith_masked_vs_constant(request, op, constant): def func(x): return op(x, constant) # Just a single column -> result will be all NA data = cudf.Series([1, 2, cudf.NA]) # in pandas, 1**NA == 1. In cudf, 1**NA == NA. request.applymarker( pytest.mark.xfail( condition=( constant is cudf.NA and op in {operator.pow, operator.ipow} ), reason="https://github.com/rapidsai/cudf/issues/7478", ) ) run_masked_udf_series(func, data, check_dtype=False) @pytest.mark.parametrize("op", arith_ops) @pytest.mark.parametrize("constant", [1, 1.5, cudf.NA]) def test_series_arith_masked_vs_constant_reflected(request, op, constant): def func(x): return op(constant, x) # Just a single column -> result will be all NA data = cudf.Series([1, 2, cudf.NA]) # Using in {1} since bool(NA == 1) raises a TypeError since NA is # neither truthy nor falsy # in pandas, 1**NA == 1. In cudf, 1**NA == NA. request.applymarker( pytest.mark.xfail( condition=( constant in {1} and op in {operator.pow, operator.ipow} ), reason="https://github.com/rapidsai/cudf/issues/7478", ) ) run_masked_udf_series(func, data, check_dtype=False) def test_series_masked_is_null_conditional(): def func(x): if x is NA: return 42 else: return x data = cudf.Series([1, cudf.NA, 3, cudf.NA]) run_masked_udf_series(func, data, check_dtype=False) @pytest.mark.parametrize("op", arith_ops + comparison_ops) def test_masked_udf_lambda_support(op): func = lambda row: op(row["a"], row["b"]) # noqa: E731 data = cudf.DataFrame( {"a": [1, cudf.NA, 3, cudf.NA], "b": [1, 2, cudf.NA, cudf.NA]} ) run_masked_udf_test(func, data, check_dtype=False) @pytest.mark.parametrize("op", arith_ops + comparison_ops) def test_masked_udf_nested_function_support(op): """ Nested functions need to be explicitly jitted by the user for numba to recognize them. Unfortunately the object representing the jitted function can not itself be used in pandas udfs. """ def inner(x, y): return op(x, y) def outer(row): x = row["a"] y = row["b"] return inner(x, y) gdf = cudf.DataFrame( {"a": [1, cudf.NA, 3, cudf.NA], "b": [1, 2, cudf.NA, cudf.NA]} ) with pytest.raises(ValueError): gdf.apply(outer, axis=1) pdf = gdf.to_pandas(nullable=True) inner_gpu = cuda.jit(device=True)(inner) def outer_gpu(row): x = row["a"] y = row["b"] return inner_gpu(x, y) got = gdf.apply(outer_gpu, axis=1) expect = pdf.apply(outer, axis=1) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "data", [ {"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, {"a": [1, 2, 3], "c": [4, 5, 6], "b": [7, 8, 9]}, {"a": [1, 2, 3], "b": [4, 5, 6], "c": ["a", "b", "c"]}, ], ) def test_masked_udf_subset_selection(data): def func(row): return row["a"] + row["b"] data = cudf.DataFrame(data) run_masked_udf_test(func, data) @pytest.mark.parametrize( "unsupported_col", [ _decimal_series( ["1.0", "2.0", "3.0"], dtype=cudf.Decimal64Dtype(2, 1) ), cudf.Series([1, 2, 3], dtype="category"), cudf.interval_range(start=0, end=3, closed=True), [[1, 2], [3, 4], [5, 6]], [{"a": 1}, {"a": 2}, {"a": 3}], ], ) def test_masked_udf_unsupported_dtype(unsupported_col): data = cudf.DataFrame() data["unsupported_col"] = unsupported_col def func(row): return row["unsupported_col"] # check that we fail when an unsupported type is used within a function with pytest.raises(ValueError): data.apply(func, axis=1) # also check that a DF containing unsupported dtypes can still run a # function that does NOT involve any of the unsupported dtype columns data["supported_col"] = 1 def other_func(row): return row["supported_col"] expect = cudf.Series(np.ones(len(data))) got = data.apply(other_func, axis=1) assert_eq(expect, got, check_dtype=False) # tests for `DataFrame.apply(f, args=(x,y,z))` # testing the whole space of possibilities is intractable # these test the most rudimentary guaranteed functionality @pytest.mark.parametrize( "data", [ {"a": [1, cudf.NA, 3]}, {"a": [0.5, 2.0, cudf.NA, cudf.NA, 5.0]}, {"a": [True, False, cudf.NA]}, ], ) @pytest.mark.parametrize("op", arith_ops + comparison_ops) def test_masked_udf_scalar_args_binops(data, op): data = cudf.DataFrame(data) def func(row, c): return op(row["a"], c) run_masked_udf_test(func, data, args=(1,), check_dtype=False) @pytest.mark.parametrize( "data", [ {"a": [1, cudf.NA, 3]}, {"a": [0.5, 2.0, cudf.NA, cudf.NA, 5.0]}, {"a": [True, False, cudf.NA]}, ], ) @pytest.mark.parametrize("op", arith_ops + comparison_ops) def test_masked_udf_scalar_args_binops_multiple(data, op): data = cudf.DataFrame(data) def func(row, c, k): x = op(row["a"], c) y = op(x, k) return y run_masked_udf_test(func, data, args=(1, 2), check_dtype=False) @pytest.mark.parametrize( "data", [ [1, cudf.NA, 3], [0.5, 2.0, cudf.NA, cudf.NA, 5.0], [True, False, cudf.NA], ], ) @pytest.mark.parametrize("op", arith_ops + comparison_ops) def test_mask_udf_scalar_args_binops_series(data, op): data = cudf.Series(data) def func(x, c): return x + c run_masked_udf_series(func, data, args=(1,), check_dtype=False) @pytest.mark.parametrize( "data", [ [1, cudf.NA, 3], [0.5, 2.0, cudf.NA, cudf.NA, 5.0], [True, False, cudf.NA], ], ) @pytest.mark.parametrize("op", arith_ops + comparison_ops) def test_masked_udf_scalar_args_binops_multiple_series(data, op): data = cudf.Series(data) def func(data, c, k): x = op(data, c) y = op(x, k) return y run_masked_udf_series(func, data, args=(1, 2), check_dtype=False) def test_masked_udf_caching(): # Make sure similar functions that differ # by simple things like constants actually # recompile data = cudf.Series([1, 2, 3]) expect = data**2 got = data.apply(lambda x: x**2) assert_eq(expect, got, check_dtype=False) # update the constant value being used and make sure # it does not result in a cache hit expect = data**3 got = data.apply(lambda x: x**3) assert_eq(expect, got, check_dtype=False) # make sure we get a hit when reapplying def f(x): return x + 1 precompiled.clear() assert precompiled.currsize == 0 data.apply(f) assert precompiled.currsize == 1 data.apply(f) assert precompiled.currsize == 1 # validate that changing the type of a scalar arg # results in a miss precompiled.clear() def f(x, c): return x + c data.apply(f, args=(1,)) assert precompiled.currsize == 1 data.apply(f, args=(1.5,)) assert precompiled.currsize == 2 @pytest.mark.parametrize( "data", [[1.0, 0.0, 1.5], [1, 0, 2], [True, False, True]] ) @pytest.mark.parametrize("operator", [float, int, bool]) def test_masked_udf_casting(operator, data): data = cudf.Series(data) def func(x): return operator(x) run_masked_udf_series(func, data, check_dtype=False) @pytest.mark.parametrize( "data", [ np.array( [0, 1, -1, 0, np.iinfo("int64").min, np.iinfo("int64").max], dtype="int64", ), np.array([0, 0, 1, np.iinfo("uint64").max], dtype="uint64"), np.array( [ 0, 0.0, -1.0, 1.5, -1.5, np.finfo("float64").min, np.finfo("float64").max, np.nan, np.inf, -np.inf, ], dtype="float64", ), [False, True, False, cudf.NA], ], ) def test_masked_udf_abs(data): data = cudf.Series(data) data[0] = cudf.NA def func(x): return abs(x) run_masked_udf_series(func, data, check_dtype=False) class TestStringUDFs: def test_string_udf_len(self, str_udf_data): def func(row): return len(row["str_col"]) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_startswith(self, str_udf_data, substr): def func(row): return row["str_col"].startswith(substr) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_endswith(self, str_udf_data, substr): def func(row): return row["str_col"].endswith(substr) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_find(self, str_udf_data, substr): def func(row): return row["str_col"].find(substr) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_rfind(self, str_udf_data, substr): def func(row): return row["str_col"].rfind(substr) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_contains(self, str_udf_data, substr): def func(row): return substr in row["str_col"] run_masked_udf_test(func, str_udf_data, check_dtype=False) @pytest.mark.parametrize("other", ["cudf", "123", "", " "]) @pytest.mark.parametrize("cmpop", comparison_ops) def test_string_udf_cmpops(self, str_udf_data, other, cmpop): def func(row): return cmpop(row["str_col"], other) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_isalnum(self, str_udf_data): def func(row): return row["str_col"].isalnum() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_isalpha(self, str_udf_data): def func(row): return row["str_col"].isalpha() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_isdigit(self, str_udf_data): def func(row): return row["str_col"].isdigit() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_isdecimal(self, str_udf_data): def func(row): return row["str_col"].isdecimal() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_isupper(self, str_udf_data): def func(row): return row["str_col"].isupper() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_islower(self, str_udf_data): def func(row): return row["str_col"].islower() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_isspace(self, str_udf_data): def func(row): return row["str_col"].isspace() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_istitle(self, str_udf_data): def func(row): return row["str_col"].istitle() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_count(self, str_udf_data, substr): def func(row): return row["str_col"].count(substr) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_return_string(self, str_udf_data): def func(row): return row["str_col"] run_masked_udf_test(func, str_udf_data, check_dtype=False) @pytest.mark.parametrize("strip_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_strip(self, str_udf_data, strip_char): def func(row): return row["str_col"].strip(strip_char) run_masked_udf_test(func, str_udf_data, check_dtype=False) @pytest.mark.parametrize("strip_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_lstrip(self, str_udf_data, strip_char): def func(row): return row["str_col"].lstrip(strip_char) run_masked_udf_test(func, str_udf_data, check_dtype=False) @pytest.mark.parametrize("strip_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_rstrip(self, str_udf_data, strip_char): def func(row): return row["str_col"].rstrip(strip_char) run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_upper(self, str_udf_data): def func(row): return row["str_col"].upper() run_masked_udf_test(func, str_udf_data, check_dtype=False) def test_string_udf_lower(self, str_udf_data): def func(row): return row["str_col"].lower() run_masked_udf_test(func, str_udf_data, check_dtype=False) @pytest.mark.parametrize( "concat_char", ["1", "a", "12", " ", "", ".", "@"] ) def test_string_udf_concat(self, str_udf_data, concat_char): def func(row): return row["str_col"] + concat_char run_masked_udf_test(func, str_udf_data, check_dtype=False) @pytest.mark.parametrize("to_replace", ["a", "1", "", "@"]) @pytest.mark.parametrize("replacement", ["a", "1", "", "@"]) def test_string_udf_replace(self, str_udf_data, to_replace, replacement): def func(row): return row["str_col"].replace(to_replace, replacement) run_masked_udf_test(func, str_udf_data, check_dtype=False)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_pack.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pickle import sys import numpy as np import pandas as pd from cudf import DataFrame, GenericIndex, Series from cudf._lib.copying import pack, unpack from cudf.testing._utils import assert_eq def test_sizeof_packed_dataframe(): np.random.seed(0) df = DataFrame() nelem = 1000 df["keys"] = hkeys = np.arange(nelem, dtype=np.float64) df["vals"] = hvals = np.random.random(nelem) packed = pack(df) nbytes = hkeys.nbytes + hvals.nbytes sizeof = sys.getsizeof(packed) assert sizeof < nbytes serialized_nbytes = len( pickle.dumps(packed, protocol=pickle.HIGHEST_PROTOCOL) ) # assert at least sizeof bytes were serialized assert serialized_nbytes >= sizeof def check_packed_equality(df): # basic assert_packed_frame_equality(df) # sliced assert_packed_frame_equality(df[:-1]) assert_packed_frame_equality(df[1:]) assert_packed_frame_equality(df[2:-2]) # sorted sortvaldf = df.sort_values("vals") assert isinstance(sortvaldf.index, GenericIndex) assert_packed_frame_equality(sortvaldf) def assert_packed_frame_equality(df): pdf = df.to_pandas() packed = pack(df) del df unpacked = unpack(packed) assert_eq(unpacked, pdf) def test_packed_dataframe_equality_numeric(): np.random.seed(0) df = DataFrame() nelem = 10 df["keys"] = np.arange(nelem, dtype=np.float64) df["vals"] = np.random.random(nelem) check_packed_equality(df) def test_packed_dataframe_equality_categorical(): np.random.seed(0) df = DataFrame() df["keys"] = pd.Categorical( ["a", "a", "a", "b", "a", "b", "a", "b", "a", "c"] ) df["vals"] = np.random.random(len(df)) check_packed_equality(df) def test_packed_dataframe_equality_list(): np.random.seed(0) df = DataFrame() df["keys"] = Series(list([i, i + 1, i + 2] for i in range(10))) df["vals"] = np.random.random(len(df)) check_packed_equality(df) def test_packed_dataframe_equality_struct(): np.random.seed(0) df = DataFrame() df["keys"] = Series( list({"0": i, "1": i + 1, "2": i + 2} for i in range(10)) ) df["vals"] = np.random.random(len(df)) check_packed_equality(df) def check_packed_unique_pointers(df): # basic assert_packed_frame_unique_pointers(df) # sliced assert_packed_frame_unique_pointers(df[:-1]) assert_packed_frame_unique_pointers(df[1:]) assert_packed_frame_unique_pointers(df[2:-2]) # sorted sortvaldf = df.sort_values("vals") assert isinstance(sortvaldf.index, GenericIndex) assert_packed_frame_unique_pointers(sortvaldf) def assert_packed_frame_unique_pointers(df): unpacked = unpack(pack(df)) for col in df: if df._data[col].data: assert df._data[col].data.get_ptr(mode="read") != unpacked._data[ col ].data.get_ptr(mode="read") def test_packed_dataframe_unique_pointers_numeric(): np.random.seed(0) df = DataFrame() nelem = 10 df["keys"] = np.arange(nelem, dtype=np.float64) df["vals"] = np.random.random(nelem) check_packed_unique_pointers(df) def test_packed_dataframe_unique_pointers_categorical(): np.random.seed(0) df = DataFrame() df["keys"] = pd.Categorical( ["a", "a", "a", "b", "a", "b", "a", "b", "a", "c"] ) df["vals"] = np.random.random(len(df)) check_packed_unique_pointers(df) def test_packed_dataframe_unique_pointers_list(): np.random.seed(0) df = DataFrame() df["keys"] = Series(list([i, i + 1, i + 2] for i in range(10))) df["vals"] = np.random.random(len(df)) check_packed_unique_pointers(df) def test_packed_dataframe_unique_pointers_struct(): np.random.seed(0) df = DataFrame() df["keys"] = Series( list({"0": i, "1": i + 1, "2": i + 2} for i in range(10)) ) df["vals"] = np.random.random(len(df)) check_packed_unique_pointers(df) def check_packed_pickled_equality(df): # basic assert_packed_frame_picklable(df) # sliced assert_packed_frame_picklable(df[:-1]) assert_packed_frame_picklable(df[1:]) assert_packed_frame_picklable(df[2:-2]) # sorted sortvaldf = df.sort_values("vals") assert isinstance(sortvaldf.index, GenericIndex) assert_packed_frame_picklable(sortvaldf) # out-of-band buffers = [] serialbytes = pickle.dumps( pack(df), protocol=5, buffer_callback=buffers.append ) for b in buffers: assert isinstance(b, pickle.PickleBuffer) loaded = unpack(pickle.loads(serialbytes, buffers=buffers)) assert_eq(loaded, df) def assert_packed_frame_picklable(df): serialbytes = pickle.dumps(pack(df)) loaded = unpack(pickle.loads(serialbytes)) assert_eq(loaded, df) def test_pickle_packed_dataframe_numeric(): np.random.seed(0) df = DataFrame() nelem = 10 df["keys"] = np.arange(nelem, dtype=np.float64) df["vals"] = np.random.random(nelem) check_packed_pickled_equality(df) def test_pickle_packed_dataframe_categorical(): np.random.seed(0) df = DataFrame() df["keys"] = pd.Categorical( ["a", "a", "a", "b", "a", "b", "a", "b", "a", "c"] ) df["vals"] = np.random.random(len(df)) check_packed_pickled_equality(df) def test_pickle_packed_dataframe_list(): np.random.seed(0) df = DataFrame() df["keys"] = Series(list([i, i + 1, i + 2] for i in range(10))) df["vals"] = np.random.random(len(df)) check_packed_pickled_equality(df) def test_pickle_packed_dataframe_struct(): np.random.seed(0) df = DataFrame() df["keys"] = Series( list({"0": i, "1": i + 1, "2": i + 2} for i in range(10)) ) df["vals"] = np.random.random(len(df)) check_packed_pickled_equality(df) def check_packed_serialized_equality(df): # basic assert_packed_frame_serializable(df) # sliced assert_packed_frame_serializable(df[:-1]) assert_packed_frame_serializable(df[1:]) assert_packed_frame_serializable(df[2:-2]) # sorted sortvaldf = df.sort_values("vals") assert isinstance(sortvaldf.index, GenericIndex) assert_packed_frame_serializable(sortvaldf) def assert_packed_frame_serializable(df): packed = pack(df) header, frames = packed.serialize() loaded = unpack(packed.deserialize(header, frames)) assert_eq(loaded, df) def test_serialize_packed_dataframe_numeric(): np.random.seed(0) df = DataFrame() nelem = 10 df["keys"] = np.arange(nelem, dtype=np.float64) df["vals"] = np.random.random(nelem) check_packed_serialized_equality(df) def test_serialize_packed_dataframe_categorical(): np.random.seed(0) df = DataFrame() df["keys"] = pd.Categorical( ["a", "a", "a", "b", "a", "b", "a", "b", "a", "c"] ) df["vals"] = np.random.random(len(df)) check_packed_serialized_equality(df) def test_serialize_packed_dataframe_list(): np.random.seed(0) df = DataFrame() df["keys"] = Series(list([i, i + 1, i + 2] for i in range(10))) df["vals"] = np.random.random(len(df)) check_packed_serialized_equality(df) def test_serialize_packed_dataframe_struct(): np.random.seed(0) df = DataFrame() df["keys"] = Series( list({"0": i, "1": i + 1, "2": i + 2} for i in range(10)) ) df["vals"] = np.random.random(len(df)) check_packed_serialized_equality(df)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_apply_rows.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import pytest import cudf from cudf.core.column import column from cudf.testing._utils import assert_eq, gen_rand_series def _kernel_multiply(a, b, out): for i, (x, y) in enumerate(zip(a, b)): out[i] = x * y @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("has_nulls", [False, True]) @pytest.mark.parametrize("pessimistic", [False, True]) def test_dataframe_apply_rows(dtype, has_nulls, pessimistic): count = 1000 gdf_series_a = gen_rand_series(dtype, count, has_nulls=has_nulls) gdf_series_b = gen_rand_series(dtype, count, has_nulls=has_nulls) gdf_series_c = gen_rand_series(dtype, count, has_nulls=has_nulls) if pessimistic: # pessimistically combine the null masks gdf_series_expected = gdf_series_a * gdf_series_b else: # optimistically ignore the null masks a = cudf.Series(column.build_column(gdf_series_a.data, dtype)) b = cudf.Series(column.build_column(gdf_series_b.data, dtype)) gdf_series_expected = a * b df_expected = cudf.DataFrame( { "a": gdf_series_a, "b": gdf_series_b, "c": gdf_series_c, "out": gdf_series_expected, } ) df_original = cudf.DataFrame( {"a": gdf_series_a, "b": gdf_series_b, "c": gdf_series_c} ) df_actual = df_original.apply_rows( _kernel_multiply, ["a", "b"], {"out": dtype}, {}, pessimistic_nulls=pessimistic, ) assert_eq(df_expected, df_actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_doctests.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. import contextlib import doctest import inspect import io import itertools import os import numpy as np import pytest import cudf pytestmark = pytest.mark.filterwarnings("ignore::FutureWarning") # modules that will be searched for doctests tests = [cudf, cudf.core.groupby] def _name_in_all(parent, name): return name in getattr(parent, "__all__", []) def _is_public_name(parent, name): return not name.startswith("_") def _find_doctests_in_obj(obj, finder=None, criteria=None): """Find all doctests in an object. Parameters ---------- obj : module or class The object to search for docstring examples. finder : doctest.DocTestFinder, optional The DocTestFinder object to use. If not provided, a DocTestFinder is constructed. criteria : callable, optional Callable indicating whether to recurse over members of the provided object. If not provided, names not defined in the object's ``__all__`` property are ignored. Yields ------ doctest.DocTest The next doctest found in the object. """ if finder is None: finder = doctest.DocTestFinder() if criteria is None: criteria = _name_in_all for docstring in finder.find(obj): if docstring.examples: yield docstring for name, member in inspect.getmembers(obj): # Only recurse over members matching the criteria if not criteria(obj, name): continue # Recurse over the public API of modules (objects defined in the # module's __all__) if inspect.ismodule(member): yield from _find_doctests_in_obj( member, finder, criteria=_name_in_all ) # Recurse over the public API of classes (attributes not prefixed with # an underscore) if inspect.isclass(member): yield from _find_doctests_in_obj( member, finder, criteria=_is_public_name ) class TestDoctests: @pytest.fixture(autouse=True) def chdir_to_tmp_path(cls, tmp_path): # Some doctests generate files, so this fixture runs the tests in a # temporary directory. original_directory = os.getcwd() os.chdir(tmp_path) yield os.chdir(original_directory) @pytest.mark.parametrize( "docstring", itertools.chain(*[_find_doctests_in_obj(mod) for mod in tests]), ids=lambda docstring: docstring.name, ) def test_docstring(self, docstring): # We ignore differences in whitespace in the doctest output, and enable # the use of an ellipsis "..." to match any string in the doctest # output. An ellipsis is useful for, e.g., memory addresses or # imprecise floating point values. optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE runner = doctest.DocTestRunner(optionflags=optionflags) # These global names are pre-defined and can be used in doctests # without first importing them. globals = dict( cudf=cudf, np=np, ) docstring.globs = globals # Capture stdout and include failing outputs in the traceback. doctest_stdout = io.StringIO() with contextlib.redirect_stdout(doctest_stdout): runner.run(docstring) results = runner.summarize() assert not results.failed, ( f"{results.failed} of {results.attempted} doctests failed for " f"{docstring.name}:\n{doctest_stdout.getvalue()}" )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_cut.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. """ Test related to Cut """ import numpy as np import pandas as pd import pytest from cudf.core.cut import cut from cudf.testing._utils import assert_eq @pytest.mark.parametrize( "x", [[1, 7, 5, 4, 6, 3], [1, 7], np.array([1, 7, 5, 4, 6, 3])] ) @pytest.mark.parametrize("bins", [1, 2, 3]) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("include_lowest", [True, False]) @pytest.mark.parametrize( "ordered", [True] ) # if ordered is False we need labels @pytest.mark.parametrize("precision", [1, 2, 3]) def test_cut_basic(x, bins, right, include_lowest, ordered, precision): # will test optional labels, retbins and duplicates separately # they need more specific parameters to work pcat = pd.cut( x=x, bins=bins, right=right, precision=precision, include_lowest=include_lowest, ordered=ordered, ) pindex = pd.CategoricalIndex(pcat) gindex = cut( x=x, bins=bins, right=right, precision=precision, include_lowest=include_lowest, ordered=ordered, ) assert_eq(pindex, gindex) @pytest.mark.parametrize("x", [[1, 7, 5, 4, 6, 3]]) @pytest.mark.parametrize("bins", [3]) # labels must be the same len as bins @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("include_lowest", [True, False]) @pytest.mark.parametrize( "ordered", [True, False] ) # labels must be unique if ordered=True @pytest.mark.parametrize("precision", [1, 2, 3]) @pytest.mark.parametrize( "labels", [["bad", "medium", "good"], ["A", "B", "C"], [1, 2, 3], False] ) def test_cut_labels( x, bins, right, include_lowest, ordered, precision, labels ): pcat = pd.cut( x=x, bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ordered=ordered, ) pindex = pd.CategoricalIndex(pcat) if labels else pcat gindex = cut( x=x, bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ordered=ordered, ) assert_eq(pindex, gindex) @pytest.mark.parametrize("x", [[1, 7, 5, 4, 6, 3]]) @pytest.mark.parametrize("bins", [3]) # labels must be the same len as bins @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("include_lowest", [True, False]) @pytest.mark.parametrize( "ordered", [False] ) # labels must be unique if ordered=True @pytest.mark.parametrize("precision", [1, 2, 3]) @pytest.mark.parametrize( "labels", [["bad", "good", "good"], ["B", "A", "B"], [1, 2, 2], False] ) def test_cut_labels_non_unique( x, bins, right, include_lowest, ordered, precision, labels ): pcat = pd.cut( x=x, bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ordered=ordered, ) pindex = pd.CategoricalIndex(pcat) if labels else pcat gindex = cut( x=x, bins=bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, ordered=ordered, ) assert_eq(pindex, gindex) @pytest.mark.parametrize( "x", [ [1, 7, 5, 4, 6, 3], [1, 7], np.array([1, 7, 5, 4, 6, 3]), np.array([2, 4, 6, 8, 10]), ], ) @pytest.mark.parametrize( "bins", [1, 2, 3, [1, 2, 3], [0, 2, 4, 6, 10]], ) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("precision", [3]) def test_cut_right(x, bins, right, precision): pcat = pd.cut( x=x, bins=bins, right=right, precision=precision, ) pindex = pd.CategoricalIndex(pcat) gindex = cut( x=x, bins=bins, right=right, precision=precision, ) assert_eq(pindex, gindex) @pytest.mark.parametrize( "x", [ [1, 7, 5, 4, 6, 3], [1, 7], np.array([1, 7, 5, 4, 6, 3]), np.array([2, 4, 6, 8, 10]), ], ) @pytest.mark.parametrize( "bins", [[0, 2, 4, 6, 10, 10], [1, 2, 2, 3, 3]], ) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("include_lowest", [True, False]) @pytest.mark.parametrize("ordered", [True]) @pytest.mark.parametrize("precision", [1, 2, 3]) @pytest.mark.parametrize("duplicates", ["drop"]) def test_cut_drop_duplicates( x, bins, right, precision, duplicates, ordered, include_lowest ): pcat = pd.cut( x=x, bins=bins, right=right, precision=precision, duplicates=duplicates, include_lowest=include_lowest, ordered=ordered, ) pindex = pd.CategoricalIndex(pcat) gindex = cut( x=x, bins=bins, right=right, precision=precision, duplicates=duplicates, include_lowest=include_lowest, ordered=ordered, ) assert_eq(pindex, gindex) @pytest.mark.parametrize( "x", [ [1, 7, 5, 4, 6, 3], [1, 7], np.array([1, 7, 5, 4, 6, 3]), np.array([2, 4, 6, 8, 10]), ], ) @pytest.mark.parametrize( "bins", [[0, 2, 4, 6, 10, 10], [1, 2, 2, 3, 3]], ) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("include_lowest", [True, False]) @pytest.mark.parametrize("ordered", [True]) @pytest.mark.parametrize("precision", [1, 2, 3]) @pytest.mark.parametrize("duplicates", ["raises"]) def test_cut_drop_duplicates_raises( x, bins, right, precision, duplicates, ordered, include_lowest ): with pytest.raises(ValueError) as excgd: cut( x=x, bins=bins, right=right, precision=precision, duplicates=duplicates, include_lowest=include_lowest, ordered=ordered, ) with pytest.raises(ValueError) as excpd: pd.cut( x=x, bins=bins, right=right, precision=precision, duplicates=duplicates, include_lowest=include_lowest, ordered=ordered, ) assert_eq(str(excgd.value), str(excpd.value)) @pytest.mark.parametrize( "x", [ [0, 0.5, 1.5, 2.5, 4.5], [1, 7, 5, 4, 6, 3], [1, 7], np.array([1, 7, 5, 4, 6, 3]), np.array([2, 4, 6, 8, 10]), ], ) @pytest.mark.parametrize( "bins", [pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])], ) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("precision", [1, 2, 3]) @pytest.mark.parametrize("duplicates", ["drop", "raise"]) def test_cut_intervalindex_bin(x, bins, right, precision, duplicates): pcat = pd.cut( x=x, bins=bins, right=right, precision=precision, duplicates=duplicates, ) pindex = pd.CategoricalIndex(pcat) gindex = cut( x=x, bins=bins, right=right, precision=precision, duplicates=duplicates, ) assert_eq(pindex, gindex) @pytest.mark.parametrize( "x", [pd.Series(np.array([2, 4, 6, 8, 10]), index=["a", "b", "c", "d", "e"])], ) @pytest.mark.parametrize("bins", [1, 2, 3]) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("include_lowest", [True, False]) @pytest.mark.parametrize("ordered", [True]) @pytest.mark.parametrize("precision", [3]) def test_cut_series(x, bins, right, include_lowest, ordered, precision): pcat = pd.cut( x=x, bins=bins, right=right, precision=precision, include_lowest=include_lowest, ordered=ordered, ) gcat = cut( x=x, bins=bins, right=right, precision=precision, include_lowest=include_lowest, ordered=ordered, ) assert_eq(pcat, gcat)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_rolling.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. import math from contextlib import contextmanager import numpy as np import pandas as pd import pytest import cudf from cudf.core._compat import PANDAS_GE_150 from cudf.testing._utils import ( _create_pandas_series_float64_default, assert_eq, ) from cudf.testing.dataset_generator import rand_dataframe @contextmanager def _hide_pandas_rolling_min_periods_warning(agg): if agg == "count": with pytest.warns( FutureWarning, match="min_periods=None will default to the size of window " "consistent with other methods in a future version. Specify " "min_periods=0 instead.", ): yield else: yield @pytest.mark.parametrize( "data,index", [ ([], []), ([1, 1, 1, 1], None), ([1, 2, 3, 4], pd.date_range("2001-01-01", "2001-01-04")), ([1, 2, 4, 9, 9, 4], ["a", "b", "c", "d", "e", "f"]), ], ) @pytest.mark.parametrize( "agg", ["sum", "min", "max", "mean", "count", "std", "var"] ) @pytest.mark.parametrize("nulls", ["none", "one", "some", "all"]) @pytest.mark.parametrize("center", [True, False]) def test_rolling_series_basic(data, index, agg, nulls, center): rng = np.random.default_rng(1) if len(data) > 0: if nulls == "one": p = rng.integers(0, len(data)) data[p] = np.nan elif nulls == "some": p1, p2 = rng.integers(0, len(data), (2,)) data[p1] = np.nan data[p2] = np.nan elif nulls == "all": data = [np.nan] * len(data) psr = _create_pandas_series_float64_default(data, index=index) gsr = cudf.Series(psr) for window_size in range(1, len(data) + 1): for min_periods in range(1, window_size + 1): expect = getattr( psr.rolling(window_size, min_periods, center), agg )().fillna(-1) got = getattr( gsr.rolling(window_size, min_periods, center), agg )().fillna(-1) assert_eq(expect, got, check_dtype=False, check_freq=False) @pytest.mark.parametrize( "data", [ {"a": [], "b": []}, {"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]}, {"a": [1, 2, 4, 9, 9, 4], "b": [1, 2, 4, 9, 9, 4]}, { "a": np.array([1, 2, 4, 9, 9, 4]), "b": np.array([1.5, 2.2, 2.2, 8.0, 9.1, 4.2]), }, ], ) @pytest.mark.parametrize( "agg", ["sum", "min", "max", "mean", "count", "std", "var"] ) @pytest.mark.parametrize("nulls", ["none", "one", "some", "all"]) @pytest.mark.parametrize("center", [True, False]) def test_rolling_dataframe_basic(data, agg, nulls, center): rng = np.random.default_rng(0) pdf = pd.DataFrame(data) if len(pdf) > 0: for col_idx in range(len(pdf.columns)): if nulls == "one": p = rng.integers(0, len(data)) pdf.iloc[p, col_idx] = np.nan elif nulls == "some": p1, p2 = rng.integers(0, len(data), (2,)) pdf.iloc[p1, col_idx] = np.nan pdf.iloc[p2, col_idx] = np.nan elif nulls == "all": pdf.iloc[:, col_idx] = np.nan gdf = cudf.from_pandas(pdf) for window_size in range(1, len(data) + 1): for min_periods in range(1, window_size + 1): expect = getattr( pdf.rolling(window_size, min_periods, center), agg )().fillna(-1) got = getattr( gdf.rolling(window_size, min_periods, center), agg )().fillna(-1) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "agg", [ pytest.param("sum"), pytest.param("min"), pytest.param("max"), pytest.param("mean"), pytest.param("count"), pytest.param("std"), pytest.param("var"), ], ) def test_rolling_with_offset(agg): psr = pd.Series( [1, 2, 4, 4, np.nan, 9], index=[ pd.Timestamp("20190101 09:00:00"), pd.Timestamp("20190101 09:00:01"), pd.Timestamp("20190101 09:00:02"), pd.Timestamp("20190101 09:00:04"), pd.Timestamp("20190101 09:00:07"), pd.Timestamp("20190101 09:00:08"), ], ) gsr = cudf.from_pandas(psr) assert_eq( getattr(psr.rolling("2s"), agg)().fillna(-1), getattr(gsr.rolling("2s"), agg)().fillna(-1), check_dtype=False, ) @pytest.mark.parametrize("agg", ["std", "var"]) @pytest.mark.parametrize("ddof", [0, 1]) @pytest.mark.parametrize("center", [True, False]) @pytest.mark.parametrize("seed", [100, 2000]) @pytest.mark.parametrize("window_size", [2, 10, 100]) def test_rolling_var_std_large(agg, ddof, center, seed, window_size): iupper_bound = math.sqrt(np.iinfo(np.int64).max / window_size) ilower_bound = -math.sqrt(abs(np.iinfo(np.int64).min) / window_size) fupper_bound = math.sqrt(np.finfo(np.float64).max / window_size) flower_bound = -math.sqrt(abs(np.finfo(np.float64).min) / window_size) n_rows = 1_000 data = rand_dataframe( dtypes_meta=[ { "dtype": "int64", "null_frequency": 0.4, "cardinality": n_rows, "min_bound": ilower_bound, "max_bound": iupper_bound, }, { "dtype": "float64", "null_frequency": 0.4, "cardinality": n_rows, "min_bound": flower_bound, "max_bound": fupper_bound, }, { "dtype": "decimal64", "null_frequency": 0.4, "cardinality": n_rows, "min_bound": ilower_bound, "max_bound": iupper_bound, }, ], rows=n_rows, use_threads=False, seed=seed, ) pdf = data.to_pandas() gdf = cudf.from_pandas(pdf) expect = getattr(pdf.rolling(window_size, 1, center), agg)(ddof=ddof) got = getattr(gdf.rolling(window_size, 1, center), agg)(ddof=ddof) import platform if platform.machine() == "aarch64": # Due to pandas-37051, pandas rolling var/std on uniform window is # not reliable. Skipping these rows when comparing. for col in expect: mask = (got[col].fillna(-1) != 0).to_pandas() expect[col] = expect[col][mask] got[col] = got[col][mask] assert_eq(expect[col], got[col], check_freq=False) else: assert_eq(expect, got, check_freq=False) def test_rolling_var_uniform_window(): """ Pandas adopts an online variance calculation algorithm. This gives a floating point artifact. In cudf, each window is computed independently from the previous window, this gives better numeric precision. """ s = pd.Series([1e8, 5, 5, 5]) expected = s.rolling(3).var() got = cudf.from_pandas(s).rolling(3).var() assert_eq(expected, got) def test_rolling_count_with_offset(): """ This test covers the xfail case from test_rolling_with_offset["count"]. It is expected that count should return a non-Nan value, even if the counted value is a Nan, unless the min-periods condition is not met. This behaviour is consistent with counts for rolling-windows, in the non-offset window case. """ psr = pd.Series( [1, 2, 4, 4, np.nan, 9], index=[ pd.Timestamp("20190101 09:00:00"), pd.Timestamp("20190101 09:00:01"), pd.Timestamp("20190101 09:00:02"), pd.Timestamp("20190101 09:00:04"), pd.Timestamp("20190101 09:00:07"), pd.Timestamp("20190101 09:00:08"), ], ) gsr = cudf.from_pandas(psr) assert_eq( getattr(gsr.rolling("2s"), "count")().fillna(-1), pd.Series( [1, 2, 2, 1, 0, 1], index=[ pd.Timestamp("20190101 09:00:00"), pd.Timestamp("20190101 09:00:01"), pd.Timestamp("20190101 09:00:02"), pd.Timestamp("20190101 09:00:04"), pd.Timestamp("20190101 09:00:07"), pd.Timestamp("20190101 09:00:08"), ], ), check_dtype=False, ) def test_rolling_getattr(): pdf = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]}) gdf = cudf.from_pandas(pdf) assert_eq( pdf.rolling(2).a.sum().fillna(-1), gdf.rolling(2).a.sum().fillna(-1), check_dtype=False, ) def test_rolling_getitem(): pdf = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]}) gdf = cudf.from_pandas(pdf) assert_eq( pdf.rolling(2)["a"].sum().fillna(-1), gdf.rolling(2)["a"].sum().fillna(-1), check_dtype=False, ) assert_eq( pdf.rolling(2)["a", "b"].sum().fillna(-1), gdf.rolling(2)["a", "b"].sum().fillna(-1), check_dtype=False, ) assert_eq( pdf.rolling(2)[["a", "b"]].sum().fillna(-1), gdf.rolling(2)["a", "b"].sum().fillna(-1), check_dtype=False, ) def test_rolling_getitem_window(): index = pd.DatetimeIndex( pd.date_range("2000-01-01", "2000-01-02", freq="1h") ) pdf = pd.DataFrame({"x": np.arange(len(index))}, index=index) gdf = cudf.from_pandas(pdf) assert_eq( pdf.rolling("2h").x.mean(), gdf.rolling("2h").x.mean(), check_freq=False, ) @pytest.mark.parametrize( "data,index", [([1.2, 4.5, 5.9, 2.4, 9.3, 7.1], None), ([], [])] ) @pytest.mark.parametrize("center", [True, False]) def test_rollling_series_numba_udf_basic(data, index, center): psr = _create_pandas_series_float64_default(data, index=index) gsr = cudf.from_pandas(psr) def some_func(A): b = 0 for a in A: b = max(b, math.sqrt(a)) return b for window_size in range(1, len(data) + 1): for min_periods in range(1, window_size + 1): assert_eq( psr.rolling(window_size, min_periods, center) .apply(some_func) .fillna(-1), gsr.rolling(window_size, min_periods, center) .apply(some_func) .fillna(-1), check_dtype=False, ) @pytest.mark.parametrize( "data", [ {"a": [], "b": []}, {"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]}, {"a": [1, 2, 4, 9, 9, 4], "b": [1, 2, 4, 9, 9, 4]}, { "a": np.array([1, 2, 4, 9, 9, 4]), "b": np.array([1.5, 2.2, 2.2, 8.0, 9.1, 4.2]), }, ], ) @pytest.mark.parametrize("center", [True, False]) def test_rolling_dataframe_numba_udf_basic(data, center): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) def some_func(A): b = 0 for a in A: b = b + a**2 return b / len(A) for window_size in range(1, len(data) + 1): for min_periods in range(1, window_size + 1): assert_eq( pdf.rolling(window_size, min_periods, center) .apply(some_func) .fillna(-1), gdf.rolling(window_size, min_periods, center) .apply(some_func) .fillna(-1), check_dtype=False, ) def test_rolling_numba_udf_with_offset(): psr = pd.Series( [1, 2, 4, 4, 8, 9], index=[ pd.Timestamp("20190101 09:00:00"), pd.Timestamp("20190101 09:00:01"), pd.Timestamp("20190101 09:00:02"), pd.Timestamp("20190101 09:00:04"), pd.Timestamp("20190101 09:00:07"), pd.Timestamp("20190101 09:00:08"), ], ) gsr = cudf.from_pandas(psr) def some_func(A): b = 0 for a in A: b = b + a return b / len(A) assert_eq( psr.rolling("2s").apply(some_func).fillna(-1), gsr.rolling("2s").apply(some_func).fillna(-1), check_dtype=False, ) @pytest.mark.parametrize( "agg", ["sum", "min", "max", "mean", "count", "var", "std"] ) def test_rolling_groupby_simple(agg): pdf = pd.DataFrame( { "a": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2], "b": [1, 2, 3, 1, 2, 3, 1, 2, 3, 1], } ) gdf = cudf.from_pandas(pdf) for window_size in range(1, len(pdf) + 1): with _hide_pandas_rolling_min_periods_warning(agg): expect = getattr( pdf.groupby("a").rolling(window_size), agg )().fillna(-1) got = getattr(gdf.groupby("a").rolling(window_size), agg)().fillna(-1) assert_eq(expect, got, check_dtype=False) pdf = pd.DataFrame( {"a": [1, 1, 1, 2, 2], "b": [1, 1, 2, 2, 3], "c": [1, 2, 3, 4, 5]} ) gdf = cudf.from_pandas(pdf) for window_size in range(1, len(pdf) + 1): with _hide_pandas_rolling_min_periods_warning(agg): expect = getattr( pdf.groupby("a").rolling(window_size), agg )().fillna(-1) got = getattr(gdf.groupby("a").rolling(window_size), agg)().fillna(-1) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "agg", ["sum", "min", "max", "mean", "count", "var", "std"] ) def test_rolling_groupby_multi(agg): pdf = pd.DataFrame( { "a": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2], "b": [0, 0, 1, 1, 0, 1, 2, 1, 1, 0], "c": [1, 2, 3, 1, 2, 3, 1, 2, 3, 1], } ) gdf = cudf.from_pandas(pdf) for window_size in range(1, len(pdf) + 1): with _hide_pandas_rolling_min_periods_warning(agg): expect = getattr( pdf.groupby(["a", "b"], sort=True).rolling(window_size), agg )().fillna(-1) got = getattr( gdf.groupby(["a", "b"], sort=True).rolling(window_size), agg )().fillna(-1) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "agg", ["sum", "min", "max", "mean", "count", "var", "std"] ) @pytest.mark.parametrize( "window_size", ["1d", "2d", "3d", "4d", "5d", "6d", "7d"] ) def test_rolling_groupby_offset(agg, window_size): pdf = pd.DataFrame( { "date": pd.date_range(start="2016-01-01", periods=7, freq="D"), "group": [1, 2, 2, 1, 1, 2, 1], "val": [5, 6, 7, 8, 1, 2, 3], } ).set_index("date") gdf = cudf.from_pandas(pdf) expect = getattr(pdf.groupby("group").rolling(window_size), agg)().fillna( -1 ) got = getattr(gdf.groupby("group").rolling(window_size), agg)().fillna(-1) assert_eq(expect, got, check_dtype=False) def test_rolling_custom_index_support(): from pandas.api.indexers import BaseIndexer class CustomIndexer(BaseIndexer): def custom_get_window_bounds( self, num_values, min_periods, center, closed, step=None ): start = np.empty(num_values, dtype=np.int64) end = np.empty(num_values, dtype=np.int64) for i in range(num_values): if self.use_expanding[i]: start[i] = 0 end[i] = i + 1 else: start[i] = i end[i] = i + self.window_size return start, end if PANDAS_GE_150: def get_window_bounds( self, num_values, min_periods, center, closed, step ): return self.custom_get_window_bounds( num_values, min_periods, center, closed, step ) else: def get_window_bounds( self, num_values, min_periods, center, closed ): return self.custom_get_window_bounds( num_values, min_periods, center, closed ) use_expanding = [True, False, True, False, True] indexer = CustomIndexer(window_size=1, use_expanding=use_expanding) df = pd.DataFrame({"values": range(5)}) gdf = cudf.from_pandas(df) expected = df.rolling(window=indexer).sum() actual = gdf.rolling(window=indexer).sum() assert_eq(expected, actual, check_dtype=False) @pytest.mark.parametrize( "indexer", [ pd.api.indexers.FixedForwardWindowIndexer(window_size=2), pd.api.indexers.VariableOffsetWindowIndexer( index=pd.date_range("2020", periods=5), offset=pd.offsets.BDay(1) ), ], ) def test_rolling_indexer_support(indexer): df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]}) gdf = cudf.from_pandas(df) expected = df.rolling(window=indexer, min_periods=2).sum() actual = gdf.rolling(window=indexer, min_periods=2).sum() assert_eq(expected, actual) def test_rolling_series(): df = cudf.DataFrame({"a": range(0, 100), "b": [10, 20, 30, 40, 50] * 20}) pdf = df.to_pandas() expected = pdf.groupby("b")["a"].rolling(5).mean() actual = df.groupby("b")["a"].rolling(5).mean() assert_eq(expected, actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_api_types.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest from pandas.api import types as pd_types import cudf from cudf.api import types @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), False), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, True), (pd.CategoricalDtype, True), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), True), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, True), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), True), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), True), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), # TODO: Currently creating an empty Series of list type ignores the # provided type and instead makes a float64 Series. (cudf.Series([[1, 2], [3, 4, 5]]), False), # TODO: Currently creating an empty Series of struct type fails because # it uses a numpy utility that doesn't understand StructDtype. (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_categorical_dtype(obj, expect): assert types.is_categorical_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, True), (int, True), (float, True), (complex, True), (str, False), (object, False), # NumPy types. (np.bool_, True), (np.int_, True), (np.float64, True), (np.complex128, True), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), True), (np.int_(), True), (np.float64(), True), (np.complex128(), True), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), True), (np.dtype("int"), True), (np.dtype("float"), True), (np.dtype("complex"), True), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), True), (np.array([], dtype=np.int_), True), (np.array([], dtype=np.float64), True), (np.array([], dtype=np.complex128), True), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), True), (pd.Series(dtype="int"), True), (pd.Series(dtype="float"), True), (pd.Series(dtype="complex"), True), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, True), (cudf.Decimal64Dtype, True), (cudf.Decimal32Dtype, True), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), True), (cudf.Decimal64Dtype(5, 2), True), (cudf.Decimal32Dtype(5, 2), True), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), True), (cudf.Series(dtype="int"), True), (cudf.Series(dtype="float"), True), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_numeric_dtype(obj, expect): assert types.is_numeric_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, True), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, True), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), True), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), True), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), True), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), True), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), True), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_integer_dtype(obj, expect): assert types.is_integer_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), True), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), True), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_integer(obj, expect): assert types.is_integer(obj) == expect # TODO: Temporarily ignoring all cases of "object" until we decide what to do. @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, True), # (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, True), (np.unicode_, True), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), False), (np.float64(), False), (np.complex128(), False), (np.str_(), True), (np.unicode_(), True), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), True), (np.dtype("unicode"), True), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), # (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), True), (np.array([], dtype=np.unicode_), True), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), # (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), True), (pd.Series(dtype="unicode"), True), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), # (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), True), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_string_dtype(obj, expect): assert types.is_string_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, True), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), False), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), True), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), True), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), True), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), True), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), True), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_datetime_dtype(obj, expect): assert types.is_datetime_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), False), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, True), (cudf.StructDtype, False), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), True), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), (cudf.Series([[1, 2], [3, 4, 5]]), True), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_list_dtype(obj, expect): assert types.is_list_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), False), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, True), (cudf.Decimal128Dtype, False), (cudf.Decimal64Dtype, False), (cudf.Decimal32Dtype, False), # (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), True), (cudf.Decimal128Dtype(5, 2), False), (cudf.Decimal64Dtype(5, 2), False), (cudf.Decimal32Dtype(5, 2), False), # (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), True), # (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_struct_dtype(obj, expect): # TODO: All inputs of interval types are currently disabled due to # inconsistent behavior of is_struct_dtype for interval types that will be # fixed as part of the array refactor. assert types.is_struct_dtype(obj) == expect @pytest.mark.parametrize( "obj, expect", ( # Base Python objects. (bool(), False), (int(), False), (float(), False), (complex(), False), ("", False), (object(), False), # Base Python types. (bool, False), (int, False), (float, False), (complex, False), (str, False), (object, False), # NumPy types. (np.bool_, False), (np.int_, False), (np.float64, False), (np.complex128, False), (np.str_, False), (np.unicode_, False), (np.datetime64, False), (np.timedelta64, False), # NumPy scalars. (np.bool_(), False), (np.int_(), False), (np.float64(), False), (np.complex128(), False), (np.str_(), False), (np.unicode_(), False), (np.datetime64(), False), (np.timedelta64(), False), # NumPy dtype objects. (np.dtype("bool"), False), (np.dtype("int"), False), (np.dtype("float"), False), (np.dtype("complex"), False), (np.dtype("str"), False), (np.dtype("unicode"), False), (np.dtype("datetime64"), False), (np.dtype("timedelta64"), False), (np.dtype("object"), False), # NumPy arrays. (np.array([], dtype=np.bool_), False), (np.array([], dtype=np.int_), False), (np.array([], dtype=np.float64), False), (np.array([], dtype=np.complex128), False), (np.array([], dtype=np.str_), False), (np.array([], dtype=np.unicode_), False), (np.array([], dtype=np.datetime64), False), (np.array([], dtype=np.timedelta64), False), (np.array([], dtype=object), False), # Pandas dtypes. (pd.CategoricalDtype.type, False), (pd.CategoricalDtype, False), # Pandas objects. (pd.Series(dtype="bool"), False), (pd.Series(dtype="int"), False), (pd.Series(dtype="float"), False), (pd.Series(dtype="complex"), False), (pd.Series(dtype="str"), False), (pd.Series(dtype="unicode"), False), (pd.Series(dtype="datetime64[s]"), False), (pd.Series(dtype="timedelta64[s]"), False), (pd.Series(dtype="category"), False), (pd.Series(dtype="object"), False), # cuDF dtypes. (cudf.CategoricalDtype, False), (cudf.ListDtype, False), (cudf.StructDtype, False), (cudf.Decimal128Dtype, True), (cudf.Decimal64Dtype, True), (cudf.Decimal32Dtype, True), (cudf.IntervalDtype, False), # cuDF dtype instances. (cudf.CategoricalDtype("a"), False), (cudf.ListDtype(int), False), (cudf.StructDtype({"a": int}), False), (cudf.Decimal128Dtype(5, 2), True), (cudf.Decimal64Dtype(5, 2), True), (cudf.Decimal32Dtype(5, 2), True), (cudf.IntervalDtype(int), False), # cuDF objects (cudf.Series(dtype="bool"), False), (cudf.Series(dtype="int"), False), (cudf.Series(dtype="float"), False), (cudf.Series(dtype="str"), False), (cudf.Series(dtype="datetime64[s]"), False), (cudf.Series(dtype="timedelta64[s]"), False), (cudf.Series(dtype="category"), False), (cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True), (cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True), (cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True), (cudf.Series([[1, 2], [3, 4, 5]]), False), (cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False), (cudf.Series(dtype=cudf.IntervalDtype(int)), False), ), ) def test_is_decimal_dtype(obj, expect): assert types.is_decimal_dtype(obj) == expect @pytest.mark.parametrize( "obj", ( # Base Python objects. bool(), int(), float(), complex(), "", object(), # Base Python types. bool, int, float, complex, str, object, # NumPy types. np.bool_, np.int_, np.float64, np.complex128, np.str_, np.unicode_, np.datetime64, np.timedelta64, # NumPy scalars. np.bool_(), np.int_(), np.float64(), np.complex128(), np.str_(), np.unicode_(), np.datetime64(), np.timedelta64(), # NumPy dtype objects. np.dtype("bool"), np.dtype("int"), np.dtype("float"), np.dtype("complex"), np.dtype("str"), np.dtype("unicode"), np.dtype("datetime64"), np.dtype("timedelta64"), np.dtype("object"), # NumPy arrays. np.array([], dtype=np.bool_), np.array([], dtype=np.int_), np.array([], dtype=np.float64), np.array([], dtype=np.complex128), np.array([], dtype=np.str_), np.array([], dtype=np.unicode_), np.array([], dtype=np.datetime64), np.array([], dtype=np.timedelta64), np.array([], dtype=object), # Pandas dtypes. # TODO: pandas does not consider these to be categoricals. # pd.CategoricalDtype.type, # pd.CategoricalDtype, # Pandas objects. pd.Series(dtype="bool"), pd.Series(dtype="int"), pd.Series(dtype="float"), pd.Series(dtype="complex"), pd.Series(dtype="str"), pd.Series(dtype="unicode"), pd.Series(dtype="datetime64[s]"), pd.Series(dtype="timedelta64[s]"), pd.Series(dtype="category"), pd.Series(dtype="object"), ), ) def test_pandas_agreement(obj): assert types.is_categorical_dtype(obj) == pd_types.is_categorical_dtype( obj ) assert types.is_numeric_dtype(obj) == pd_types.is_numeric_dtype(obj) assert types.is_integer_dtype(obj) == pd_types.is_integer_dtype(obj) assert types.is_integer(obj) == pd_types.is_integer(obj) assert types.is_string_dtype(obj) == pd_types.is_string_dtype(obj) @pytest.mark.parametrize( "obj", ( # Base Python objects. bool(), int(), float(), complex(), "", object(), # Base Python types. bool, int, float, complex, str, object, # NumPy types. np.bool_, np.int_, np.float64, np.complex128, np.str_, np.unicode_, np.datetime64, np.timedelta64, # NumPy scalars. np.bool_(), np.int_(), np.float64(), np.complex128(), np.str_(), np.unicode_(), np.datetime64(), np.timedelta64(), # NumPy dtype objects. np.dtype("bool"), np.dtype("int"), np.dtype("float"), np.dtype("complex"), np.dtype("str"), np.dtype("unicode"), np.dtype("datetime64"), np.dtype("timedelta64"), np.dtype("object"), # NumPy arrays. np.array([], dtype=np.bool_), np.array([], dtype=np.int_), np.array([], dtype=np.float64), np.array([], dtype=np.complex128), np.array([], dtype=np.str_), np.array([], dtype=np.unicode_), np.array([], dtype=np.datetime64), np.array([], dtype=np.timedelta64), np.array([], dtype=object), # Pandas dtypes. # TODO: pandas does not consider these to be categoricals. # pd.CategoricalDtype.type, # pd.CategoricalDtype, # Pandas objects. pd.Series(dtype="bool"), pd.Series(dtype="int"), pd.Series(dtype="float"), pd.Series(dtype="complex"), pd.Series(dtype="str"), pd.Series(dtype="unicode"), pd.Series(dtype="datetime64[s]"), pd.Series(dtype="timedelta64[s]"), pd.Series(dtype="category"), pd.Series(dtype="object"), ), ) def test_pandas_agreement_scalar(obj): assert types.is_scalar(obj) == pd_types.is_scalar(obj) # TODO: Add test of interval. # TODO: Add test of Scalar.
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_unaops.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. import itertools import operator import re import numpy as np import pandas as pd import pytest import cudf from cudf import Series from cudf.testing import _utils as utils _unaops = [operator.abs, operator.invert, operator.neg, np.ceil, np.floor] @pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES) def test_series_abs(dtype): arr = (np.random.random(1000) * 100).astype(dtype) sr = Series(arr) np.testing.assert_equal(sr.abs().to_numpy(), np.abs(arr)) np.testing.assert_equal(abs(sr).to_numpy(), abs(arr)) @pytest.mark.parametrize("dtype", utils.INTEGER_TYPES) def test_series_invert(dtype): arr = (np.random.random(1000) * 100).astype(dtype) sr = Series(arr) np.testing.assert_equal((~sr).to_numpy(), np.invert(arr)) np.testing.assert_equal((~sr).to_numpy(), ~arr) def test_series_neg(): arr = np.random.random(100) * 100 sr = Series(arr) np.testing.assert_equal((-sr).to_numpy(), -arr) @pytest.mark.parametrize("mth", ["min", "max", "sum", "product"]) def test_series_pandas_methods(mth): np.random.seed(0) arr = (1 + np.random.random(5) * 100).astype(np.int64) sr = Series(arr) psr = pd.Series(arr) np.testing.assert_equal(getattr(sr, mth)(), getattr(psr, mth)()) @pytest.mark.parametrize("mth", ["min", "max", "sum", "product", "quantile"]) def test_series_pandas_methods_empty(mth): arr = np.array([]) sr = Series(arr) psr = pd.Series(arr) np.testing.assert_equal(getattr(sr, mth)(), getattr(psr, mth)()) def generate_valid_scalar_unaop_combos(): results = [] # All ops valid for integer values int_values = [0, 1, -1] int_dtypes = utils.INTEGER_TYPES int_ops = _unaops results += list(itertools.product(int_values, int_dtypes, int_ops)) float_values = [0.0, 1.0, -1.1] float_dtypes = utils.FLOAT_TYPES float_ops = [op for op in _unaops if op is not operator.invert] results += list(itertools.product(float_values, float_dtypes, float_ops)) bool_values = [True, False] bool_dtypes = ["bool"] bool_ops = [op for op in _unaops if op is not operator.neg] results += list(itertools.product(bool_values, bool_dtypes, bool_ops)) return results @pytest.mark.filterwarnings("ignore:overflow encountered in scalar negative") @pytest.mark.parametrize("slr,dtype,op", generate_valid_scalar_unaop_combos()) def test_scalar_unary_operations(slr, dtype, op): slr_host = np.array([slr])[0].astype(cudf.dtype(dtype)) slr_device = cudf.Scalar(slr, dtype=dtype) expect = op(slr_host) got = op(slr_device) assert expect == got.value # f16 for small ints with ceil and float if expect.dtype == np.dtype("float16"): assert got.dtype == np.dtype("float32") else: assert expect.dtype == got.dtype def test_scalar_logical(): T = cudf.Scalar(True) F = cudf.Scalar(False) assert T assert not F assert T and T assert not (T and F) assert not (F and T) assert not (F and F) assert T or T assert T or F assert F or T assert not (F or F) def test_scalar_no_negative_bools(): x = cudf.Scalar(True) with pytest.raises( TypeError, match=re.escape( "Boolean scalars in cuDF do not " "support negation, use logical not" ), ): -x def test_series_bool_neg(): sr = Series([True, False, True, None, False, None, True, True]) psr = sr.to_pandas(nullable=True) utils.assert_eq((-sr).to_pandas(nullable=True), -psr, check_dtype=True)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_column_accessor.py
# Copyright (c) 2020, NVIDIA CORPORATION. import pandas as pd import pytest import cudf from cudf.core.column_accessor import ColumnAccessor from cudf.testing._utils import assert_eq simple_test_data = [ {}, {"a": []}, {"a": [1]}, {"a": ["a"]}, {"a": [1, 2, 3], "b": ["a", "b", "c"]}, ] mi_test_data = [ {("a", "b"): [1, 2, 4], ("a", "c"): [2, 3, 4]}, {("a", "b"): [1, 2, 3], ("a", ""): [2, 3, 4]}, {("a", "b"): [1, 2, 4], ("c", "d"): [2, 3, 4]}, {("a", "b"): [1, 2, 3], ("a", "c"): [2, 3, 4], ("b", ""): [4, 5, 6]}, ] def check_ca_equal(lhs, rhs): assert lhs.level_names == rhs.level_names assert lhs.multiindex == rhs.multiindex for l_key, r_key in zip(lhs, rhs): assert l_key == r_key assert_eq(lhs[l_key], rhs[r_key]) @pytest.fixture(params=simple_test_data) def simple_data(request): return request.param @pytest.fixture(params=mi_test_data) def mi_data(request): return request.param @pytest.fixture(params=simple_test_data + mi_test_data) def all_data(request): return request.param def test_to_pandas_simple(simple_data): """ Test that a ColumnAccessor converts to a correct pd.Index """ ca = ColumnAccessor(simple_data) assert_eq(ca.to_pandas_index(), pd.DataFrame(simple_data).columns) def test_to_pandas_multiindex(mi_data): ca = ColumnAccessor(mi_data, multiindex=True) assert_eq(ca.to_pandas_index(), pd.DataFrame(mi_data).columns) def test_to_pandas_multiindex_names(): ca = ColumnAccessor( {("a", "b"): [1, 2, 3], ("c", "d"): [3, 4, 5]}, multiindex=True, level_names=("foo", "bar"), ) assert_eq( ca.to_pandas_index(), pd.MultiIndex.from_tuples( (("a", "b"), ("c", "d")), names=("foo", "bar") ), ) def test_iter(simple_data): """ Test that iterating over the CA yields column names. """ ca = ColumnAccessor(simple_data) for expect_key, got_key in zip(simple_data, ca): assert expect_key == got_key def test_all_columns(simple_data): """ Test that all values of the CA are columns. """ ca = ColumnAccessor(simple_data) for col in ca.values(): assert isinstance(col, cudf.core.column.ColumnBase) def test_column_size_mismatch(): """ Test that constructing a CA from columns of differing sizes throws an error. """ with pytest.raises(ValueError): ColumnAccessor({"a": [1], "b": [1, 2]}) def test_select_by_label_simple(): """ Test getting a column by label """ ca = ColumnAccessor({"a": [1, 2, 3], "b": [2, 3, 4]}) check_ca_equal(ca.select_by_label("a"), ColumnAccessor({"a": [1, 2, 3]})) check_ca_equal(ca.select_by_label("b"), ColumnAccessor({"b": [2, 3, 4]})) def test_select_by_label_multiindex(): """ Test getting column(s) by label with MultiIndex """ ca = ColumnAccessor( { ("a", "b", "c"): [1, 2, 3], ("a", "b", "e"): [2, 3, 4], ("b", "x", ""): [4, 5, 6], ("a", "d", "e"): [3, 4, 5], }, multiindex=True, ) expect = ColumnAccessor( {("b", "c"): [1, 2, 3], ("b", "e"): [2, 3, 4], ("d", "e"): [3, 4, 5]}, multiindex=True, ) got = ca.select_by_label("a") check_ca_equal(expect, got) expect = ColumnAccessor({"c": [1, 2, 3], "e": [2, 3, 4]}, multiindex=False) got = ca.select_by_label(("a", "b")) check_ca_equal(expect, got) expect = ColumnAccessor( {("b", "c"): [1, 2, 3], ("b", "e"): [2, 3, 4], ("d", "e"): [3, 4, 5]}, multiindex=True, ) got = ca.select_by_label("a") check_ca_equal(expect, got) expect = ColumnAccessor({"c": [1, 2, 3], "e": [2, 3, 4]}, multiindex=False) got = ca.select_by_label(("a", "b")) check_ca_equal(expect, got) def test_select_by_label_simple_slice(): ca = ColumnAccessor({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]}) expect = ColumnAccessor({"b": [2, 3, 4], "c": [3, 4, 5]}) got = ca.select_by_label(slice("b", "c")) check_ca_equal(expect, got) def test_select_by_label_multiindex_slice(): ca = ColumnAccessor( { ("a", "b", "c"): [1, 2, 3], ("a", "b", "e"): [2, 3, 4], ("a", "d", "e"): [3, 4, 5], ("b", "x", ""): [4, 5, 6], }, multiindex=True, ) # pandas needs columns to be sorted to do slicing with multiindex expect = ca got = ca.select_by_label(slice(None, None)) check_ca_equal(expect, got) expect = ColumnAccessor( { ("a", "b", "e"): [2, 3, 4], ("a", "d", "e"): [3, 4, 5], ("b", "x", ""): [4, 5, 6], }, multiindex=True, ) got = ca.select_by_label(slice(("a", "b", "e"), ("b", "x", ""))) check_ca_equal(expect, got) def test_by_label_list(): ca = ColumnAccessor({"a": [1, 2, 3], "b": [2, 3, 4], "c": [3, 4, 5]}) expect = ColumnAccessor({"b": [2, 3, 4], "c": [3, 4, 5]}) got = ca.select_by_label(["b", "c"]) check_ca_equal(expect, got) def test_select_by_index_simple(): """ Test getting a column by label """ ca = ColumnAccessor({"a": [1, 2, 3], "b": [2, 3, 4]}) check_ca_equal(ca.select_by_index(0), ColumnAccessor({"a": [1, 2, 3]})) check_ca_equal(ca.select_by_index(1), ColumnAccessor({"b": [2, 3, 4]})) check_ca_equal(ca.select_by_index([0, 1]), ca) check_ca_equal(ca.select_by_index(slice(0, None)), ca) def test_select_by_index_multiindex(): """ Test getting column(s) by label with MultiIndex """ ca = ColumnAccessor( { ("a", "b", "c"): [1, 2, 3], ("a", "b", "e"): [2, 3, 4], ("b", "x", ""): [4, 5, 6], ("a", "d", "e"): [3, 4, 5], }, multiindex=True, ) expect = ColumnAccessor( { ("a", "b", "c"): [1, 2, 3], ("a", "b", "e"): [2, 3, 4], ("b", "x", ""): [4, 5, 6], }, multiindex=True, ) got = ca.select_by_index(slice(0, 3)) check_ca_equal(expect, got) expect = ColumnAccessor( { ("a", "b", "c"): [1, 2, 3], ("a", "b", "e"): [2, 3, 4], ("a", "d", "e"): [3, 4, 5], }, multiindex=True, ) got = ca.select_by_index([0, 1, 3]) check_ca_equal(expect, got) def test_select_by_index_empty(): ca = ColumnAccessor( { ("a", "b", "c"): [1, 2, 3], ("a", "b", "e"): [2, 3, 4], ("b", "x", ""): [4, 5, 6], ("a", "d", "e"): [3, 4, 5], }, multiindex=True, ) expect = ColumnAccessor( {}, multiindex=True, level_names=((None, None, None)) ) got = ca.select_by_index(slice(None, 0)) check_ca_equal(expect, got) got = ca.select_by_index([]) check_ca_equal(expect, got) def test_replace_level_values_RangeIndex(): ca = ColumnAccessor( {("a"): [1, 2, 3], ("b"): [2, 3, 4], ("c"): [3, 4, 5]}, multiindex=False, ) expect = ColumnAccessor( {("f"): [1, 2, 3], ("b"): [2, 3, 4], ("c"): [3, 4, 5]}, multiindex=False, ) got = ca.rename_levels(mapper={"a": "f"}, level=0) check_ca_equal(expect, got) def test_replace_level_values_MultiColumn(): ca = ColumnAccessor( {("a", 1): [1, 2, 3], ("a", 2): [2, 3, 4], ("b", 1): [3, 4, 5]}, multiindex=True, ) expect = ColumnAccessor( {("f", 1): [1, 2, 3], ("f", 2): [2, 3, 4], ("b", 1): [3, 4, 5]}, multiindex=True, ) got = ca.rename_levels(mapper={"a": "f"}, level=0) check_ca_equal(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_hdf.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import os from string import ascii_letters import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import DATETIME_TYPES, NUMERIC_TYPES, assert_eq try: import tables # noqa F401 except ImportError: pytest.skip( "PyTables is not installed and is required for HDF reading/writing", allow_module_level=True, ) @pytest.fixture(params=[0, 1, 10, 100]) def pdf(request): types = NUMERIC_TYPES + DATETIME_TYPES + ["bool"] typer = {"col_" + val: val for val in types} ncols = len(types) nrows = request.param # Create a pandas dataframe with random data of mixed types test_pdf = pd.DataFrame( [list(range(ncols * i, ncols * (i + 1))) for i in range(nrows)], columns=pd.Index([f"col_{typ}" for typ in types], name="foo"), ) # Delete the name of the column index, and rename the row index test_pdf.columns.name = None test_pdf.index.name = "test_index" # Cast all the column dtypes to objects, rename them, and then cast to # appropriate types test_pdf = ( test_pdf.astype("object") .astype(typer) .rename({"col_datetime64[ms]": "col_datetime64"}, axis=1) ) # Create non-numeric categorical data otherwise may be typecasted data = [ascii_letters[np.random.randint(0, 52)] for i in range(nrows)] test_pdf["col_category"] = pd.Series(data, dtype="category") return (test_pdf, nrows) @pytest.fixture def gdf(pdf): pdf, nrows = pdf return (cudf.DataFrame.from_pandas(pdf), nrows) @pytest.fixture(params=["fixed", "table"]) def hdf_files(request, tmp_path_factory, pdf): pdf, nrows = pdf if request.param == "fixed": pdf = pdf.drop("col_category", axis=1) fname_df = tmp_path_factory.mktemp("hdf") / "test_df.hdf" pdf.to_hdf(fname_df, "hdf_df_tests", format=request.param) fname_series = {} for column in pdf.columns: fname_series[column] = ( tmp_path_factory.mktemp("hdf") / "test_series.hdf" ) pdf[column].to_hdf( fname_series[column], "hdf_series_tests", format=request.param ) return (fname_df, fname_series, request.param, nrows) @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.filterwarnings("ignore:Strings are not yet supported") @pytest.mark.parametrize( "columns", [["col_int8"], ["col_category"], ["col_int32", "col_float32"], None], ) def test_hdf_reader(hdf_files, columns): hdf_df_file, hdf_series, format, nrows = hdf_files if format == "fixed" and columns is not None: pytest.skip("Can't use columns with format 'fixed'") if format == "table" and nrows == 0: pytest.skip("Can't read 0 row table with format 'table'") expect_df = pd.read_hdf(hdf_df_file, columns=columns) got_df = cudf.read_hdf(hdf_df_file, columns=columns) assert_eq( expect_df, got_df, check_categorical=False, check_index_type=False ) for column in hdf_series.keys(): expect_series = pd.read_hdf(hdf_series[column]) got_series = cudf.read_hdf(hdf_series[column]) assert_eq(expect_series, got_series, check_index_type=False) @pytest.mark.parametrize("format", ["fixed", "table"]) @pytest.mark.parametrize("complib", ["zlib", "bzip2", "lzo", "blosc"]) @pytest.mark.filterwarnings("ignore:Using CPU") def test_hdf_writer(tmpdir, pdf, gdf, complib, format): pdf, nrows = pdf gdf, _ = gdf if format == "fixed": pdf = pdf.drop("col_category", axis=1) gdf = gdf.drop("col_category", axis=1) pdf_df_fname = tmpdir.join("pdf_df.hdf") gdf_df_fname = tmpdir.join("gdf_df.hdf") pdf.to_hdf(pdf_df_fname, "hdf_tests", format=format, complib=complib) gdf.to_hdf(gdf_df_fname, "hdf_tests", format=format, complib=complib) assert os.path.exists(pdf_df_fname) assert os.path.exists(gdf_df_fname) if format == "table" and nrows == 0: pytest.skip("Can't read 0 row table with format 'table'") expect = pd.read_hdf(pdf_df_fname) got = pd.read_hdf(gdf_df_fname) assert_eq(expect, got, check_index_type=False) for column in pdf.columns: pdf_series_fname = tmpdir.join(column + "_" + "pdf_series.hdf") gdf_series_fname = tmpdir.join(column + "_" + "gdf_series.hdf") pdf[column].to_hdf( pdf_series_fname, "hdf_tests", format=format, complib=complib ) gdf[column].to_hdf( gdf_series_fname, "hdf_tests", format=format, complib=complib ) assert os.path.exists(pdf_series_fname) assert os.path.exists(gdf_series_fname) expect_series = pd.read_hdf(pdf_series_fname) got_series = pd.read_hdf(gdf_series_fname) assert_eq(expect_series, got_series, check_index_type=False)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_struct.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.core.dtypes import StructDtype from cudf.testing._utils import DATETIME_TYPES, TIMEDELTA_TYPES, assert_eq @pytest.mark.parametrize( "data", [ [{}], [{"a": None}], [{"a": 1}], [{"a": "one"}], [{"a": 1}, {"a": 2}], [{"a": 1, "b": "one"}, {"a": 2, "b": "two"}], [{"b": "two", "a": None}, None, {"a": "one", "b": "two"}], ], ) def test_create_struct_series(data): expect = pd.Series(data) got = cudf.Series(data) assert_eq(expect, got, check_dtype=False) def test_struct_of_struct_copy(): sr = cudf.Series([{"a": {"b": 1}}]) assert_eq(sr, sr.copy()) def test_struct_of_struct_loc(): df = cudf.DataFrame({"col": [{"a": {"b": 1}}]}) expect = cudf.Series([{"a": {"b": 1}}], name="col") assert_eq(expect, df["col"]) @pytest.mark.parametrize( "key, expect", [(0, [1, 3]), (1, [2, 4]), ("a", [1, 3]), ("b", [2, 4])] ) def test_struct_for_field(key, expect): sr = cudf.Series([{"a": 1, "b": 2}, {"a": 3, "b": 4}]) expect = cudf.Series(expect) got = sr.struct.field(key) assert_eq(expect, got) @pytest.mark.parametrize("input_obj", [[{"a": 1, "b": cudf.NA, "c": 3}]]) def test_series_construction_with_nulls(input_obj): expect = pa.array(input_obj, from_pandas=True) got = cudf.Series(input_obj).to_arrow() assert expect == got @pytest.mark.parametrize( "fields", [ {"a": np.dtype(np.int64)}, {"a": np.dtype(np.int64), "b": None}, { "a": cudf.ListDtype(np.dtype(np.int64)), "b": cudf.Decimal64Dtype(1, 0), }, { "a": cudf.ListDtype(cudf.StructDtype({"b": np.dtype(np.int64)})), "b": cudf.ListDtype(cudf.ListDtype(np.dtype(np.int64))), }, ], ) def test_serialize_struct_dtype(fields): dtype = cudf.StructDtype(fields) recreated = dtype.__class__.deserialize(*dtype.serialize()) assert recreated == dtype @pytest.mark.parametrize( "series, expected", [ ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": 1}, {}, ], {"a": "Hello world", "b": [], "c": cudf.NA}, ), ([{}], {}), ( [{"b": True}, {"a": 1, "c": [1, 2, 3], "d": "1", "b": False}], {"a": cudf.NA, "c": cudf.NA, "d": cudf.NA, "b": True}, ), ], ) def test_struct_getitem(series, expected): sr = cudf.Series(series) assert sr[0] == expected @pytest.mark.parametrize( "data, item", [ ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": cudf.NA}, {"a": "abcde", "b": [4, 5, 6], "c": 9}, ], {"a": "Hello world", "b": [], "c": cudf.NA}, ), ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": cudf.NA}, {"a": "abcde", "b": [4, 5, 6], "c": 9}, ], {}, ), ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": cudf.NA}, {"a": "abcde", "b": [4, 5, 6], "c": 9}, ], cudf.NA, ), ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": cudf.NA}, {"a": "abcde", "b": [4, 5, 6], "c": 9}, ], {"a": "Second element", "b": [1, 2], "c": 1000}, ), ], ) def test_struct_setitem(data, item): sr = cudf.Series(data) sr[1] = item data[1] = item expected = cudf.Series(data) assert sr.to_arrow() == expected.to_arrow() @pytest.mark.parametrize( "data", [ {"a": 1, "b": "rapids", "c": [1, 2, 3, 4]}, {"a": "Hello"}, ], ) def test_struct_scalar_host_construction(data): slr = cudf.Scalar(data) assert slr.value == data assert list(slr.device_value.value.values()) == list(data.values()) @pytest.mark.parametrize( ("data", "dtype"), [ ( {"a": 1, "b": "rapids", "c": [1, 2, 3, 4], "d": cudf.NA}, cudf.StructDtype( { "a": np.dtype(np.int64), "b": np.dtype(np.str_), "c": cudf.ListDtype(np.dtype(np.int64)), "d": np.dtype(np.int64), } ), ), ( {"b": [], "c": [1, 2, 3]}, cudf.StructDtype( { "b": cudf.ListDtype(np.dtype(np.int64)), "c": cudf.ListDtype(np.dtype(np.int64)), } ), ), ], ) def test_struct_scalar_host_construction_no_dtype_inference(data, dtype): # cudf cannot infer the dtype of the scalar when it contains only nulls or # is empty. slr = cudf.Scalar(data, dtype=dtype) assert slr.value == data assert list(slr.device_value.value.values()) == list(data.values()) def test_struct_scalar_null(): slr = cudf.Scalar(cudf.NA, dtype=StructDtype) assert slr.device_value.value is cudf.NA def test_struct_explode(): s = cudf.Series([], dtype=cudf.StructDtype({})) expect = cudf.DataFrame({}) assert_eq(expect, s.struct.explode()) s = cudf.Series( [ {"a": 1, "b": "x"}, {"a": 2, "b": "y"}, {"a": 3, "b": "z"}, {"a": 4, "b": "a"}, ] ) expect = cudf.DataFrame({"a": [1, 2, 3, 4], "b": ["x", "y", "z", "a"]}) got = s.struct.explode() assert_eq(expect, got) # check that a copy was made: got["a"][0] = 5 assert_eq(s.struct.explode(), expect) def test_dataframe_to_struct(): df = cudf.DataFrame() expect = cudf.Series(dtype=cudf.StructDtype({})) got = df.to_struct() assert_eq(expect, got) df = cudf.DataFrame({"a": [1, 2, 3], "b": ["x", "y", "z"]}) expect = cudf.Series( [{"a": 1, "b": "x"}, {"a": 2, "b": "y"}, {"a": 3, "b": "z"}] ) got = df.to_struct() assert_eq(expect, got) # check that a copy was made: df["a"][0] = 5 assert_eq(got, expect) # check that a non-string (but convertible to string) named column can be # converted to struct df = cudf.DataFrame([[1, 2], [3, 4]], columns=[(1, "b"), 0]) expect = cudf.Series([{"(1, 'b')": 1, "0": 2}, {"(1, 'b')": 3, "0": 4}]) with pytest.warns(UserWarning, match="will be casted"): got = df.to_struct() assert_eq(got, expect) @pytest.mark.parametrize( "series, slce", [ ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": 1}, {}, None, ], slice(1, None), ), ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": 1}, {}, None, {"d": ["Hello", "rapids"]}, None, cudf.NA, ], slice(1, 5), ), ( [ {"a": "Hello world", "b": []}, {"a": "CUDF", "b": [1, 2, 3], "c": 1}, {}, None, {"c": 5}, None, cudf.NA, ], slice(None, 4), ), ([{"a": {"b": 42, "c": -1}}, {"a": {"b": 0, "c": None}}], slice(0, 1)), ], ) def test_struct_slice(series, slce): got = cudf.Series(series)[slce] expected = cudf.Series(series[slce]) assert got.to_arrow() == expected.to_arrow() def test_struct_slice_nested_struct(): data = [ {"a": {"b": 42, "c": "abc"}}, {"a": {"b": 42, "c": "hello world"}}, ] got = cudf.Series(data)[0:1] expect = cudf.Series(data[0:1]) assert got.to_arrow() == expect.to_arrow() @pytest.mark.parametrize( "data", [ [{}], [{"a": None}], [{"a": 1}], [{"a": "one"}], [{"a": 1}, {"a": 2}], [{"a": 1, "b": "one"}, {"a": 2, "b": "two"}], [{"b": "two", "a": None}, None, {"a": "one", "b": "two"}], ], ) def test_struct_field_errors(data): got = cudf.Series(data) with pytest.raises(KeyError): got.struct.field("notWithinFields") with pytest.raises(IndexError): got.struct.field(100) @pytest.mark.parametrize("dtype", DATETIME_TYPES + TIMEDELTA_TYPES) def test_struct_with_datetime_and_timedelta(dtype): df = cudf.DataFrame( { "a": [12, 232, 2334], "datetime": cudf.Series([23432, 3432423, 324324], dtype=dtype), } ) series = df.to_struct() a_array = np.array([12, 232, 2334]) datetime_array = np.array([23432, 3432423, 324324]).astype(dtype) actual = series.to_pandas() values_list = [] for i, val in enumerate(a_array): values_list.append({"a": val, "datetime": datetime_array[i]}) expected = pd.Series(values_list) assert_eq(expected, actual) def test_struct_int_values(): series = cudf.Series( [{"a": 1, "b": 2}, {"a": 10, "b": None}, {"a": 5, "b": 6}] ) actual_series = series.to_pandas() assert isinstance(actual_series[0]["b"], int) assert isinstance(actual_series[1]["b"], type(None)) assert isinstance(actual_series[2]["b"], int) def test_nested_struct_from_pandas_empty(): # tests constructing nested structs columns that would result in # libcudf EMPTY type child columns inheriting their parent's null # mask. See GH PR: #10761 pdf = pd.Series([[{"c": {"x": None}}], [{"c": None}]]) gdf = cudf.from_pandas(pdf) assert_eq(pdf, gdf) def _nested_na_replace(struct_scalar): """ Replace `cudf.NA` with `None` in the dict """ for key, value in struct_scalar.items(): if value is cudf.NA: struct_scalar[key] = None return struct_scalar @pytest.mark.parametrize( "data, idx, expected", [ ( [{"f2": {"a": "sf21"}, "f1": "a"}, {"f1": "sf12", "f2": None}], 0, {"f1": "a", "f2": {"a": "sf21"}}, ), ( [ {"f2": {"a": "sf21"}}, {"f1": "sf12", "f2": None}, ], 0, {"f1": cudf.NA, "f2": {"a": "sf21"}}, ), ( [{"a": "123"}, {"a": "sf12", "b": {"a": {"b": "c"}}}], 1, {"a": "sf12", "b": {"a": {"b": "c"}}}, ), ], ) def test_nested_struct_extract_host_scalars(data, idx, expected): series = cudf.Series(data) assert _nested_na_replace(series[idx]) == _nested_na_replace(expected) def test_struct_memory_usage(): s = cudf.Series([{"a": 1, "b": 10}, {"a": 2, "b": 20}, {"a": 3, "b": 30}]) df = s.struct.explode() assert_eq(s.memory_usage(), df.memory_usage().sum()) def test_struct_with_null_memory_usage(): df = cudf.DataFrame( { "a": cudf.Series([1, 2, -1, -1, 3], dtype="int64"), "b": cudf.Series([10, 20, -1, -1, 30], dtype="int64"), } ) s = df.to_struct() assert s.memory_usage() == 80 s[2:4] = None assert s.memory_usage() == 272 @pytest.mark.parametrize( "indices", [slice(0, 3), slice(1, 4), slice(None, None, 2), slice(1, None, 2)], ids=[":3", "1:4", "0::2", "1::2"], ) @pytest.mark.parametrize( "values", [[None, {}, {}, None], [{}, {}, {}, {}]], ids=["nulls", "no_nulls"], ) def test_struct_empty_children_slice(indices, values): s = cudf.Series(values) actual = s.iloc[indices] expect = cudf.Series(values[indices], index=range(len(values))[indices]) assert_eq(actual, expect) def test_struct_iterate_error(): s = cudf.Series( [{"f2": {"a": "sf21"}, "f1": "a"}, {"f1": "sf12", "f2": None}] ) with pytest.raises(TypeError): iter(s.struct)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_timedelta.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import datetime import operator import cupy as cp import numpy as np import pandas as pd import pytest import cudf from cudf.testing import _utils as utils from cudf.testing._utils import assert_eq, assert_exceptions_equal _TIMEDELTA_DATA = [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [ 136457654736252, 134736784364431, 245345345545332, 223432411, 2343241, 3634548734, 23234, ], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ] _TIMEDELTA_DATA_NON_OVERFLOW = [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ] _cmpops = [ operator.lt, operator.gt, operator.le, operator.ge, operator.eq, operator.ne, ] @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], [0.3534, 12, 22, 343, 43.53534, 4353.42], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_series_create(data, dtype): if dtype not in ("timedelta64[ns]"): pytest.skip( "Bug in pandas : https://github.com/pandas-dev/pandas/issues/35465" ) psr = pd.Series( cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype ) gsr = cudf.Series(data, dtype=dtype) assert_eq(psr, gsr) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [12, 12, 22, 343, 4353534, 435342], [0.3534, 12, 22, 343, 43.53534, 4353.42], cp.asarray([10, 20, 30, 100]), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize("cast_dtype", ["int64", "category"]) def test_timedelta_from_typecast(data, dtype, cast_dtype): if dtype not in ("timedelta64[ns]"): pytest.skip( "Bug in pandas : https://github.com/pandas-dev/pandas/issues/35465" ) psr = pd.Series( cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype ) gsr = cudf.Series(data, dtype=dtype) if cast_dtype == "int64": assert_eq(psr.values.view(cast_dtype), gsr.astype(cast_dtype).values) else: assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype)) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [12, 12, 22, 343, 4353534, 435342], [0.3534, 12, 22, 343, 43.53534, 4353.42], cp.asarray([10, 20, 30, 100]), ], ) @pytest.mark.parametrize("cast_dtype", utils.TIMEDELTA_TYPES) def test_timedelta_to_typecast(data, cast_dtype): psr = pd.Series(cp.asnumpy(data) if isinstance(data, cp.ndarray) else data) gsr = cudf.Series(data) assert_eq(psr.astype(cast_dtype), gsr.astype(cast_dtype)) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], [0.3534, 12, 22, 343, 43.53534, 4353.42], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_from_pandas(data, dtype): psr = pd.Series( cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype ) gsr = cudf.from_pandas(psr) assert_eq(psr, gsr) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_series_to_numpy(data, dtype): gsr = cudf.Series(data, dtype=dtype) expected = np.array( cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype ) expected = expected[~np.isnan(expected)] actual = gsr.dropna().to_numpy() np.testing.assert_array_equal(expected, actual) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_series_to_pandas(data, dtype): gsr = cudf.Series(data, dtype=dtype) expected = np.array( cp.asnumpy(data) if isinstance(data, cp.ndarray) else data, dtype=dtype ) expected = pd.Series(expected) actual = gsr.to_pandas() assert_eq(expected, actual) @pytest.mark.parametrize( "data,other", [ ([1000000, 200000, 3000000], [1000000, 200000, 3000000]), ([1000000, 200000, None], [1000000, 200000, None]), ([], []), ([None], [None]), ([None, None, None, None, None], [None, None, None, None, None]), ( [12, 12, 22, 343, 4353534, 435342], [12, 12, 22, 343, 4353534, 435342], ), (np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])), (cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])), ([1000000, 200000, 3000000], [200000, 34543, 3000000]), ([1000000, 200000, None], [1000000, 200000, 3000000]), ([None], [1]), ( [12, 12, 22, 343, 4353534, 435342], [None, 1, 220, 3, 34, 4353423287], ), (np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])), (cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "ops", [ "eq", "ne", "lt", "gt", "le", "ge", "add", "radd", "sub", "rsub", "floordiv", "truediv", "mod", ], ) def test_timedelta_ops_misc_inputs(data, other, dtype, ops): gsr = cudf.Series(data, dtype=dtype) other_gsr = cudf.Series(other, dtype=dtype) psr = gsr.to_pandas() other_psr = other_gsr.to_pandas() expected = getattr(psr, ops)(other_psr) actual = getattr(gsr, ops)(other_gsr) if ops in ("eq", "lt", "gt", "le", "ge"): actual = actual.fillna(False) elif ops == "ne": actual = actual.fillna(True) if ops == "floordiv": expected[actual.isna().to_pandas()] = np.nan assert_eq(expected, actual) @pytest.mark.parametrize( "datetime_data,timedelta_data", [ ([1000000, 200000, 3000000], [1000000, 200000, 3000000]), ([1000000, 200000, None], [1000000, 200000, None]), ([], []), ([None], [None]), ([None, None, None, None, None], [None, None, None, None, None]), ( [12, 12, 22, 343, 4353534, 435342], [12, 12, 22, 343, 4353534, 435342], ), (np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])), (cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])), ([1000000, 200000, 3000000], [200000, 34543, 3000000]), ([1000000, 200000, None], [1000000, 200000, 3000000]), ([None], [1]), ( [12, 12, 22, 343, 4353534, 435342], [None, 1, 220, 3, 34, 4353423287], ), (np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])), (cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])), ( [12, 11, 232, 223432411, 2343241, 234324, 23234], [11, 1132324, 2322323111, 23341, 2434, 332, 323], ), ( [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [11, 1132324, 2322323111, 23341, 2434, 332, 323], ), ( [11, 1132324, 2322323111, 23341, 2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ), ( [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ), ], ) @pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES) @pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "ops", ["add", "sub"], ) def test_timedelta_ops_datetime_inputs( datetime_data, timedelta_data, datetime_dtype, timedelta_dtype, ops ): gsr_datetime = cudf.Series(datetime_data, dtype=datetime_dtype) gsr_timedelta = cudf.Series(timedelta_data, dtype=timedelta_dtype) psr_datetime = gsr_datetime.to_pandas() psr_timedelta = gsr_timedelta.to_pandas() expected = getattr(psr_datetime, ops)(psr_timedelta) actual = getattr(gsr_datetime, ops)(gsr_timedelta) assert_eq(expected, actual) if ops == "add": expected = getattr(psr_timedelta, ops)(psr_datetime) actual = getattr(gsr_timedelta, ops)(gsr_datetime) assert_eq(expected, actual) elif ops == "sub": assert_exceptions_equal( lfunc=operator.sub, rfunc=operator.sub, lfunc_args_and_kwargs=([psr_timedelta, psr_datetime],), rfunc_args_and_kwargs=([gsr_timedelta, gsr_datetime],), ) @pytest.mark.parametrize( "df", [ pd.DataFrame( { "A": pd.Series(pd.date_range("2012-1-1", periods=3, freq="D")), "B": pd.Series([pd.Timedelta(days=i) for i in range(3)]), } ), pd.DataFrame( { "A": pd.Series( pd.date_range("1994-1-1", periods=50, freq="D") ), "B": pd.Series([pd.Timedelta(days=i) for i in range(50)]), } ), ], ) @pytest.mark.parametrize("op", ["add", "sub"]) def test_timedelta_dataframe_ops(df, op): pdf = df gdf = cudf.from_pandas(pdf) if op == "add": pdf["C"] = pdf["A"] + pdf["B"] gdf["C"] = gdf["A"] + gdf["B"] elif op == "sub": pdf["C"] = pdf["A"] - pdf["B"] gdf["C"] = gdf["A"] - gdf["B"] assert_eq(pdf, gdf) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ], ) @pytest.mark.parametrize( "other_scalars", [ datetime.timedelta(days=768), datetime.timedelta(seconds=768), datetime.timedelta(microseconds=7), datetime.timedelta(minutes=447), datetime.timedelta(hours=447), datetime.timedelta(weeks=734), np.timedelta64(4, "s"), np.timedelta64(456, "D"), np.timedelta64(46, "h"), np.timedelta64("nat"), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64(1, "ns"), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "op", [ "add", "sub", "truediv", "mod", "floordiv", ], ) def test_timedelta_series_ops_with_scalars(data, other_scalars, dtype, op): gsr = cudf.Series(data=data, dtype=dtype) psr = gsr.to_pandas() if op == "add": expected = psr + other_scalars actual = gsr + other_scalars elif op == "sub": expected = psr - other_scalars actual = gsr - other_scalars elif op == "truediv": expected = psr / other_scalars actual = gsr / other_scalars elif op == "floordiv": expected = psr // other_scalars actual = gsr // other_scalars elif op == "mod": expected = psr % other_scalars actual = gsr % other_scalars assert_eq(expected, actual) if op == "add": expected = other_scalars + psr actual = other_scalars + gsr elif op == "sub": expected = other_scalars - psr actual = other_scalars - gsr elif op == "truediv": expected = other_scalars / psr actual = other_scalars / gsr elif op == "floordiv": expected = other_scalars // psr actual = other_scalars // gsr elif op == "mod": expected = other_scalars % psr actual = other_scalars % gsr assert_eq(expected, actual) @pytest.mark.parametrize( "reverse", [ False, pytest.param( True, marks=pytest.mark.xfail( strict=True, reason=( "timedelta modulo by zero is dubiously defined in " "both pandas and cuDF " "(see https://github.com/rapidsai/cudf/issues/5938)" ), ), ), ], ) def test_timedelta_series_mod_with_scalar_zero(reverse): gsr = cudf.Series(data=[0.2434], dtype=np.timedelta64(1, "ns")) psr = gsr.to_pandas() scalar = datetime.timedelta(days=768) if reverse: expected = scalar % psr actual = scalar % gsr else: expected = psr % scalar actual = gsr % scalar assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ], ) @pytest.mark.parametrize( "cpu_scalar", [ datetime.timedelta(seconds=768), datetime.timedelta(microseconds=7), np.timedelta64(4, "s"), np.timedelta64("nat", "s"), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64("nat", "ns"), np.timedelta64(1, "ns"), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "op", [ "add", "sub", "truediv", "mod", "floordiv", ], ) def test_timedelta_series_ops_with_cudf_scalars(data, cpu_scalar, dtype, op): gpu_scalar = cudf.Scalar(cpu_scalar) gsr = cudf.Series(data=data, dtype=dtype) psr = gsr.to_pandas() if op == "add": expected = psr + cpu_scalar actual = gsr + gpu_scalar elif op == "sub": expected = psr - cpu_scalar actual = gsr - gpu_scalar elif op == "truediv": expected = psr / cpu_scalar actual = gsr / gpu_scalar elif op == "floordiv": expected = psr // cpu_scalar actual = gsr // gpu_scalar elif op == "mod": expected = psr % cpu_scalar actual = gsr % gpu_scalar assert_eq(expected, actual) if op == "add": expected = cpu_scalar + psr actual = gpu_scalar + gsr elif op == "sub": expected = cpu_scalar - psr actual = gpu_scalar - gsr elif op == "truediv": expected = cpu_scalar / psr actual = gpu_scalar / gsr elif op == "floordiv": expected = cpu_scalar // psr actual = gpu_scalar // gsr elif op == "mod": expected = cpu_scalar % psr actual = gpu_scalar % gsr assert_eq(expected, actual) @pytest.mark.parametrize( "reverse", [ False, pytest.param( True, marks=pytest.mark.xfail( strict=True, reason=( "timedelta modulo by zero is dubiously defined in " "both pandas and cuDF " "(see https://github.com/rapidsai/cudf/issues/5938)" ), ), ), ], ) def test_timedelta_series_mod_with_cudf_scalar_zero(reverse): gsr = cudf.Series(data=[0.2434], dtype=np.timedelta64(1, "ns")) psr = gsr.to_pandas() scalar = datetime.timedelta(days=768) gpu_scalar = cudf.Scalar(scalar) if reverse: expected = scalar % psr actual = gpu_scalar % gsr else: expected = psr % scalar actual = gsr % gpu_scalar assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize("reduction_op", ["sum", "mean", "median", "quantile"]) def test_timedelta_reduction_ops(data, dtype, reduction_op): gsr = cudf.Series(data, dtype=dtype) psr = gsr.to_pandas() if len(psr) > 0 and psr.isnull().all() and reduction_op == "median": with pytest.warns(RuntimeWarning, match="Mean of empty slice"): expected = getattr(psr, reduction_op)() else: expected = getattr(psr, reduction_op)() actual = getattr(gsr, reduction_op)() if pd.isna(expected) and pd.isna(actual): pass elif isinstance(expected, pd.Timedelta) and isinstance( actual, pd.Timedelta ): assert ( expected.round(gsr._column.time_unit).value == actual.round(gsr._column.time_unit).value ) else: assert_eq(expected, actual) @pytest.mark.parametrize( "data", _TIMEDELTA_DATA, ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_dt_components(data, dtype): gsr = cudf.Series(data, dtype=dtype) psr = gsr.to_pandas() expected = psr.dt.components actual = gsr.dt.components if gsr.isnull().any(): assert_eq(expected, actual.astype("float")) else: assert_eq(expected, actual) @pytest.mark.parametrize( "data", _TIMEDELTA_DATA, ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_dt_properties(data, dtype): gsr = cudf.Series(data, dtype=dtype) psr = gsr.to_pandas() def local_assert(expected, actual): if gsr.isnull().any(): assert_eq(expected, actual.astype("float")) else: assert_eq(expected, actual) expected_days = psr.dt.days actual_days = gsr.dt.days local_assert(expected_days, actual_days) expected_seconds = psr.dt.seconds actual_seconds = gsr.dt.seconds local_assert(expected_seconds, actual_seconds) expected_microseconds = psr.dt.microseconds actual_microseconds = gsr.dt.microseconds local_assert(expected_microseconds, actual_microseconds) expected_nanoseconds = psr.dt.nanoseconds actual_nanoseconds = gsr.dt.nanoseconds local_assert(expected_nanoseconds, actual_nanoseconds) @pytest.mark.parametrize( "data", _TIMEDELTA_DATA, ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_index(data, dtype): gdi = cudf.Index(data, dtype=dtype) pdi = gdi.to_pandas() assert_eq(pdi, gdi) @pytest.mark.parametrize("data", _TIMEDELTA_DATA_NON_OVERFLOW) @pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES) @pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES) def test_timedelta_index_datetime_index_ops( data, datetime_dtype, timedelta_dtype ): gdt = cudf.Index(data, dtype=datetime_dtype) gtd = cudf.Index(data, dtype=timedelta_dtype) pdt = gdt.to_pandas() ptd = gtd.to_pandas() assert_eq(gdt - gtd, pdt - ptd) assert_eq(gdt + gtd, pdt + ptd) @pytest.mark.parametrize( "datetime_data,timedelta_data", [ ([1000000, 200000, 3000000], [1000000, 200000, 3000000]), ([1000000, 200000, None], [1000000, 200000, None]), ([], []), ([None], [None]), ([None, None, None, None, None], [None, None, None, None, None]), ( [12, 12, 22, 343, 4353534, 435342], [12, 12, 22, 343, 4353534, 435342], ), (np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])), (cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])), ([1000000, 200000, 3000000], [200000, 34543, 3000000]), ([1000000, 200000, None], [1000000, 200000, 3000000]), ([None], [1]), ( [12, 12, 22, 343, 4353534, 435342], [None, 1, 220, 3, 34, 4353423287], ), (np.array([10, 20, 30, None, 100]), np.array([10, 20, 30, None, 100])), (cp.asarray([10, 20, 30, 100]), cp.asarray([10, 20, 30, 100])), ( [12, 11, 232, 223432411, 2343241, 234324, 23234], [11, 1132324, 2322323111, 23341, 2434, 332, 323], ), ( [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [11, 1132324, 2322323111, 23341, 2434, 332, 323], ), ( [11, 1132324, 2322323111, 23341, 2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ), ( [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ), ], ) @pytest.mark.parametrize("datetime_dtype", utils.DATETIME_TYPES) @pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES) def test_timedelta_datetime_index_ops_misc( datetime_data, timedelta_data, datetime_dtype, timedelta_dtype ): gdt = cudf.Index(datetime_data, dtype=datetime_dtype) gtd = cudf.Index(timedelta_data, dtype=timedelta_dtype) pdt = gdt.to_pandas() ptd = gtd.to_pandas() assert_eq(gdt - gtd, pdt - ptd) assert_eq(gdt + gtd, pdt + ptd) @pytest.mark.parametrize("data", _TIMEDELTA_DATA_NON_OVERFLOW) @pytest.mark.parametrize( "other_scalars", [ pd.Timedelta(1513393355.5, unit="s"), pd.Timedelta(34765, unit="D"), datetime.timedelta(days=768), datetime.timedelta(seconds=768), datetime.timedelta(microseconds=7), datetime.timedelta(minutes=447), datetime.timedelta(hours=447), datetime.timedelta(weeks=734), np.timedelta64(4, "s"), np.timedelta64(456, "D"), np.timedelta64(46, "h"), np.timedelta64("nat"), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64(1, "ns"), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "op", [ "add", "sub", "truediv", "floordiv", ], ) @pytest.mark.filterwarnings("ignore:divide by zero:RuntimeWarning:pandas") def test_timedelta_index_ops_with_scalars( request, data, other_scalars, dtype, op ): gtdi = cudf.Index(data=data, dtype=dtype) ptdi = gtdi.to_pandas() if op == "add": expected = ptdi + other_scalars actual = gtdi + other_scalars elif op == "sub": expected = ptdi - other_scalars actual = gtdi - other_scalars elif op == "truediv": expected = ptdi / other_scalars actual = gtdi / other_scalars elif op == "floordiv": expected = ptdi // other_scalars actual = gtdi // other_scalars assert_eq(expected, actual) if op == "add": expected = other_scalars + ptdi actual = other_scalars + gtdi elif op == "sub": expected = other_scalars - ptdi actual = other_scalars - gtdi elif op == "truediv": expected = other_scalars / ptdi actual = other_scalars / gtdi elif op == "floordiv": expected = other_scalars // ptdi actual = other_scalars // gtdi # Division by zero for datetime or timedelta is # dubiously defined in both pandas (Any // 0 -> 0 in # pandas) and cuDF (undefined behaviour) request.applymarker( pytest.mark.xfail( condition=( op == "floordiv" and 0 in ptdi.astype("int") and np.timedelta64(other_scalars).item() is not None ), reason="Related to https://github.com/rapidsai/cudf/issues/5938", ) ) assert_eq(expected, actual) @pytest.mark.parametrize("data", _TIMEDELTA_DATA_NON_OVERFLOW) @pytest.mark.parametrize( "cpu_scalar", [ pd.Timedelta(1513393355.5, unit="s"), datetime.timedelta(seconds=768), datetime.timedelta(microseconds=7), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64(1, "ns"), ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "op", [ "add", "sub", "truediv", "floordiv", ], ) def test_timedelta_index_ops_with_cudf_scalars( request, data, cpu_scalar, dtype, op ): gtdi = cudf.Index(data=data, dtype=dtype) ptdi = gtdi.to_pandas() gpu_scalar = cudf.Scalar(cpu_scalar) if op == "add": expected = ptdi + cpu_scalar actual = gtdi + gpu_scalar elif op == "sub": expected = ptdi - cpu_scalar actual = gtdi - gpu_scalar elif op == "truediv": expected = ptdi / cpu_scalar actual = gtdi / gpu_scalar elif op == "floordiv": expected = ptdi // cpu_scalar actual = gtdi // gpu_scalar assert_eq(expected, actual) if op == "add": expected = cpu_scalar + ptdi actual = gpu_scalar + gtdi elif op == "sub": expected = cpu_scalar - ptdi actual = gpu_scalar - gtdi elif op == "truediv": expected = cpu_scalar / ptdi actual = gpu_scalar / gtdi elif op == "floordiv": expected = cpu_scalar // ptdi actual = gpu_scalar // gtdi # Division by zero for datetime or timedelta is # dubiously defined in both pandas (Any // 0 -> 0 in # pandas) and cuDF (undefined behaviour) request.applymarker( pytest.mark.xfail( condition=( op == "floordiv" and 0 in ptdi.astype("int") and np.timedelta64(cpu_scalar).item() is not None ), reason="https://github.com/rapidsai/cudf/issues/5938", ) ) assert_eq(expected, actual) @pytest.mark.parametrize("data", _TIMEDELTA_DATA) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize("name", ["abcd", None]) def test_timedelta_index_properties(data, dtype, name): gdi = cudf.Index(data, dtype=dtype, name=name) pdi = gdi.to_pandas() def local_assert(expected, actual): if actual._values.null_count: assert_eq(expected, actual.astype("float64")) else: assert_eq(expected, actual) expected_days = pdi.days actual_days = gdi.days local_assert(expected_days, actual_days) expected_seconds = pdi.seconds actual_seconds = gdi.seconds local_assert(expected_seconds, actual_seconds) expected_microseconds = pdi.microseconds actual_microseconds = gdi.microseconds local_assert(expected_microseconds, actual_microseconds) expected_nanoseconds = pdi.nanoseconds actual_nanoseconds = gdi.nanoseconds local_assert(expected_nanoseconds, actual_nanoseconds) expected_components = pdi.components actual_components = gdi.components if actual_components.isnull().any().any(): assert_eq(expected_components, actual_components.astype("float")) else: assert_eq( expected_components, actual_components, check_index_type=not actual_components.empty, ) @pytest.mark.parametrize("data", _TIMEDELTA_DATA) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "fill_value", [ np.timedelta64(4, "s"), np.timedelta64(456, "D"), np.timedelta64(46, "h"), np.timedelta64("nat"), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64(1, "ns"), "NaT", ], ) def test_timedelta_fillna(data, dtype, fill_value): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() expected = psr.dropna() actual = sr.dropna() assert_eq(expected, actual) expected = psr.fillna(fill_value) actual = sr.fillna(fill_value) assert_eq(expected, actual) expected = expected.dropna() actual = actual.dropna() assert_eq(expected, actual) @pytest.mark.parametrize( "gsr,expected_series", [ ( cudf.Series([1, 2, 3], dtype="timedelta64[ns]"), cudf.Series( [ "0 days 00:00:00.000000001", "0 days 00:00:00.000000002", "0 days 00:00:00.000000003", ] ), ), ( cudf.Series([1000000, 200000, 3000000], dtype="timedelta64[ms]"), cudf.Series( ["0 days 00:16:40", "0 days 00:03:20", "0 days 00:50:00"] ), ), ( cudf.Series([1000000, 200000, 3000000], dtype="timedelta64[s]"), cudf.Series( ["11 days 13:46:40", "2 days 07:33:20", "34 days 17:20:00"] ), ), ( cudf.Series( [None, None, None, None, None], dtype="timedelta64[us]" ), cudf.Series([None, None, None, None, None], dtype="str"), ), ( cudf.Series( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[us]", ), cudf.Series( [ "0 days 00:02:16.457654", None, "0 days 00:04:05.345345", "0 days 00:03:43.432411", None, "0 days 01:00:34.548734", "0 days 00:00:00.023234", ] ), ), ( cudf.Series( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[ms]", ), cudf.Series( [ "1 days 13:54:17.654", None, "2 days 20:09:05.345", "2 days 14:03:52.411", None, "42 days 01:35:48.734", "0 days 00:00:23.234", ] ), ), ( cudf.Series( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[s]", ), cudf.Series( [ "1579 days 08:54:14", None, "2839 days 15:29:05", "2586 days 00:33:31", None, "42066 days 12:52:14", "0 days 06:27:14", ] ), ), ( cudf.Series( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[ns]", ), cudf.Series( [ "0 days 00:00:00.136457654", None, "0 days 00:00:00.245345345", "0 days 00:00:00.223432411", None, "0 days 00:00:03.634548734", "0 days 00:00:00.000023234", ] ), ), ], ) def test_timedelta_str_roundtrip(gsr, expected_series): actual_series = gsr.astype("str") assert_eq(expected_series, actual_series) assert_eq(gsr, actual_series.astype(gsr.dtype)) def test_timedelta_invalid_ops(): sr = cudf.Series([1, 2, 3], dtype="timedelta64[ns]") psr = sr.to_pandas() assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([psr, 1],), rfunc_args_and_kwargs=([sr, 1],), ) assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([psr, "a"],), rfunc_args_and_kwargs=([sr, "a"],), ) dt_sr = cudf.Series([1, 2, 3], dtype="datetime64[ns]") dt_psr = dt_sr.to_pandas() assert_exceptions_equal( lfunc=operator.mod, rfunc=operator.mod, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.mod, rfunc=operator.mod, lfunc_args_and_kwargs=([psr, "a"],), rfunc_args_and_kwargs=([sr, "a"],), check_exception_type=False, ) assert_exceptions_equal( lfunc=operator.gt, rfunc=operator.gt, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.lt, rfunc=operator.lt, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.ge, rfunc=operator.ge, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.le, rfunc=operator.le, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.truediv, rfunc=operator.truediv, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.floordiv, rfunc=operator.floordiv, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.mul, rfunc=operator.mul, lfunc_args_and_kwargs=([psr, dt_psr],), rfunc_args_and_kwargs=([sr, dt_sr],), ) assert_exceptions_equal( lfunc=operator.mul, rfunc=operator.mul, lfunc_args_and_kwargs=([psr, psr],), rfunc_args_and_kwargs=([sr, sr],), check_exception_type=False, ) assert_exceptions_equal( lfunc=operator.xor, rfunc=operator.xor, lfunc_args_and_kwargs=([psr, psr],), rfunc_args_and_kwargs=([sr, sr],), ) def test_timedelta_datetime_cast_invalid(): sr = cudf.Series([1, 2, 3], dtype="timedelta64[ns]") psr = sr.to_pandas() assert_exceptions_equal( psr.astype, sr.astype, (["datetime64[ns]"],), (["datetime64[ns]"],), ) sr = cudf.Series([1, 2, 3], dtype="datetime64[ns]") psr = sr.to_pandas() assert_exceptions_equal( psr.astype, sr.astype, (["timedelta64[ns]"],), (["timedelta64[ns]"],), ) @pytest.mark.parametrize("data", [[], [1, 2, 3, 4, 5]]) @pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES) @pytest.mark.parametrize("timedelta_dtype", utils.TIMEDELTA_TYPES) def test_numeric_to_timedelta(data, dtype, timedelta_dtype): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() actual = sr.astype(timedelta_dtype) expected = pd.Series(psr.to_numpy().astype(timedelta_dtype)) assert_eq(expected, actual) @pytest.mark.parametrize("data", [[], [1, 2, 3, 4, 5]]) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize( "scalar", [ 1, 2, 3, "a", np.timedelta64(1, "s"), np.timedelta64(2, "s"), np.timedelta64(2, "D"), np.timedelta64(3, "ms"), np.timedelta64(4, "us"), np.timedelta64(5, "ns"), np.timedelta64(6, "ns"), np.datetime64(6, "s"), ], ) def test_timedelta_contains(data, dtype, scalar): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() expected = scalar in sr actual = scalar in psr assert_eq(expected, actual) @pytest.mark.parametrize("data", [[1, 2, 3], [], [1, 20, 1000, None]]) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize("ddof", [1, 2, 3]) def test_timedelta_std(data, dtype, ddof): gsr = cudf.Series(data, dtype=dtype) psr = gsr.to_pandas() expected = psr.std(ddof=ddof) actual = gsr.std(ddof=ddof) if np.isnat(expected.to_numpy()) and np.isnat(actual.to_numpy()): assert True else: np.testing.assert_allclose( expected.to_numpy().astype("float64"), actual.to_numpy().astype("float64"), rtol=1e-5, atol=0, ) @pytest.mark.parametrize("op", ["max", "min"]) @pytest.mark.parametrize( "data", [ [], [1, 2, 3, 100], [10, None, 100, None, None], [None, None, None], [1231], ], ) @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) def test_timedelta_reductions(data, op, dtype): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() actual = getattr(sr, op)() expected = getattr(psr, op)() if np.isnat(expected.to_numpy()) and np.isnat(actual): assert True else: assert_eq(expected.to_numpy(), actual) def test_error_values(): s = cudf.Series([1, 2, 3], dtype="timedelta64[ns]") with pytest.raises( NotImplementedError, match="TimeDelta Arrays is not yet implemented in cudf", ): s.values @pytest.mark.parametrize("dtype", utils.TIMEDELTA_TYPES) @pytest.mark.parametrize("name", [None, "delta-index"]) def test_create_TimedeltaIndex(dtype, name): gdi = cudf.TimedeltaIndex( [1132223, 2023232, 342234324, 4234324], dtype=dtype, name=name ) pdi = gdi.to_pandas() assert_eq(pdi, gdi) @pytest.mark.parametrize("data", [[43534, 43543, 37897, 2000]]) @pytest.mark.parametrize("dtype", ["timedelta64[ns]"]) def test_timedelta_constructor(data, dtype): expected = pd.TimedeltaIndex(data=data, dtype=dtype) actual = cudf.TimedeltaIndex(data=data, dtype=dtype) assert_eq(expected, actual) expected = pd.TimedeltaIndex(data=pd.Series(data), dtype=dtype) actual = cudf.TimedeltaIndex(data=cudf.Series(data), dtype=dtype) assert_eq(expected, actual) @pytest.mark.parametrize("op", [operator.add, operator.sub]) def test_timdelta_binop_tz_timestamp(op): s = cudf.Series([1, 2, 3], dtype="timedelta64[ns]") pd_tz_timestamp = pd.Timestamp("1970-01-01 00:00:00.000000001", tz="utc") with pytest.raises(NotImplementedError): op(s, pd_tz_timestamp) date_tz_scalar = datetime.datetime.now(datetime.timezone.utc) with pytest.raises(NotImplementedError): op(s, date_tz_scalar) def test_timedelta_getitem_na(): s = cudf.Series([1, 2, None, 3], dtype="timedelta64[ns]") assert s[2] is cudf.NaT @pytest.mark.parametrize("data1", [[123, 456, None, 321, None]]) @pytest.mark.parametrize("data2", [[123, 456, 789, None, None]]) @pytest.mark.parametrize("op", _cmpops) def test_timedelta_series_cmpops_pandas_compatibility(data1, data2, op): gsr1 = cudf.Series(data=data1, dtype="timedelta64[ns]") psr1 = gsr1.to_pandas() gsr2 = cudf.Series(data=data2, dtype="timedelta64[ns]") psr2 = gsr2.to_pandas() expect = op(psr1, psr2) with cudf.option_context("mode.pandas_compatible", True): got = op(gsr1, gsr2) assert_eq(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_repr.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. import textwrap import cupy as cp import numpy as np import pandas as pd import pytest from hypothesis import given, settings, strategies as st import cudf from cudf.testing import _utils as utils from cudf.utils.dtypes import np_dtypes_to_pandas_dtypes repr_categories = [ "uint16", "int64", "float64", "str", "category", "datetime64[ns]", ] @pytest.mark.parametrize("dtype", repr_categories) @pytest.mark.parametrize("nrows", [0, 5, 10]) def test_null_series(nrows, dtype): size = 5 sr = cudf.Series(np.random.randint(1, 9, size)).astype(dtype) sr[np.random.choice([False, True], size=size)] = None if dtype != "category" and cudf.dtype(dtype).kind in {"u", "i"}: ps = pd.Series( sr._column.data_array_view(mode="read").copy_to_host(), dtype=np_dtypes_to_pandas_dtypes.get( cudf.dtype(dtype), cudf.dtype(dtype) ), ) ps[sr.isnull().to_pandas()] = pd.NA else: ps = sr.to_pandas() pd.options.display.max_rows = int(nrows) psrepr = repr(ps).replace("NaN", "<NA>").replace("None", "<NA>") if "UInt" in psrepr: psrepr = psrepr.replace("UInt", "uint") elif "Int" in psrepr: psrepr = psrepr.replace("Int", "int") assert psrepr.split() == repr(sr).split() pd.reset_option("display.max_rows") dtype_categories = [ "float32", "float64", "datetime64[ns]", "str", "category", ] @pytest.mark.parametrize("ncols", [1, 2, 3, 4, 5, 10]) def test_null_dataframe(ncols): size = 20 gdf = cudf.DataFrame() for idx, dtype in enumerate(dtype_categories): sr = cudf.Series(np.random.randint(0, 128, size)).astype(dtype) sr[np.random.choice([False, True], size=size)] = None gdf[dtype] = sr pdf = gdf.to_pandas() pd.options.display.max_columns = int(ncols) pdf_repr = repr(pdf).replace("NaN", "<NA>").replace("None", "<NA>") assert pdf_repr.split() == repr(gdf).split() pd.reset_option("display.max_columns") @pytest.mark.parametrize("dtype", repr_categories) @pytest.mark.parametrize("nrows", [None, 0, 1, 2, 9, 10, 11, 19, 20, 21]) def test_full_series(nrows, dtype): size = 20 ps = pd.Series(np.random.randint(0, 100, size)).astype(dtype) sr = cudf.from_pandas(ps) pd.options.display.max_rows = nrows assert repr(ps) == repr(sr) pd.reset_option("display.max_rows") @pytest.mark.parametrize("nrows", [5, 10, 15]) @pytest.mark.parametrize("ncols", [5, 10, 15]) @pytest.mark.parametrize("size", [20, 21]) @pytest.mark.parametrize("dtype", repr_categories) def test_full_dataframe_20(dtype, size, nrows, ncols): pdf = pd.DataFrame( {idx: np.random.randint(0, 100, size) for idx in range(size)} ).astype(dtype) gdf = cudf.from_pandas(pdf) with pd.option_context( "display.max_rows", int(nrows), "display.max_columns", int(ncols) ): assert repr(pdf) == repr(gdf) assert pdf._repr_html_() == gdf._repr_html_() assert pdf._repr_latex_() == gdf._repr_latex_() @given( st.lists( st.integers(-9223372036854775808, 9223372036854775807), min_size=1, max_size=10000, ) ) @settings(deadline=None) def test_integer_dataframe(x): gdf = cudf.DataFrame({"x": x}) pdf = gdf.to_pandas() pd.options.display.max_columns = 1 assert repr(gdf) == repr(pdf) assert repr(gdf.T) == repr(pdf.T) pd.reset_option("display.max_columns") @given( st.lists( st.integers(-9223372036854775808, 9223372036854775807), max_size=10000 ) ) @settings(deadline=None) def test_integer_series(x): sr = cudf.Series(x, dtype=int) ps = pd.Series(data=x, dtype=int) assert repr(sr) == repr(ps) @given(st.lists(st.floats())) @settings(deadline=None) def test_float_dataframe(x): gdf = cudf.DataFrame({"x": cudf.Series(x, dtype=float, nan_as_null=False)}) pdf = gdf.to_pandas() assert repr(gdf) == repr(pdf) @given(st.lists(st.floats())) @settings(deadline=None) def test_float_series(x): sr = cudf.Series(x, dtype=float, nan_as_null=False) ps = pd.Series(data=x, dtype=float) assert repr(sr) == repr(ps) @pytest.fixture def mixed_pdf(): pdf = pd.DataFrame() pdf["Integer"] = np.array([2345, 11987, 9027, 9027]) pdf["Date"] = np.array( ["18/04/1995", "14/07/1994", "07/06/2006", "16/09/2005"] ) pdf["Float"] = np.array([9.001, 8.343, 6, 2.781]) pdf["Integer2"] = np.array([2345, 106, 2088, 789277]) pdf["Category"] = np.array(["M", "F", "F", "F"]) pdf["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"]) pdf["Boolean"] = np.array([True, False, True, False]) return pdf @pytest.fixture def mixed_gdf(mixed_pdf): return cudf.from_pandas(mixed_pdf) def test_mixed_dataframe(mixed_pdf, mixed_gdf): assert repr(mixed_gdf) == repr(mixed_pdf) def test_mixed_series(mixed_pdf, mixed_gdf): for col in mixed_gdf.columns: assert repr(mixed_gdf[col]) == repr(mixed_pdf[col]) def test_MI(): gdf = cudf.DataFrame( { "a": np.random.randint(0, 4, 10), "b": np.random.randint(0, 4, 10), "c": np.random.randint(0, 4, 10), } ) levels = [["a", "b", "c", "d"], ["w", "x", "y", "z"], ["m", "n"]] codes = cudf.DataFrame( { "a": [0, 0, 0, 0, 1, 1, 2, 2, 3, 3], "b": [0, 1, 2, 3, 0, 1, 2, 3, 0, 1], "c": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1], } ) pd.options.display.max_rows = 999 pd.options.display.max_columns = 0 gdf = gdf.set_index(cudf.MultiIndex(levels=levels, codes=codes)) pdf = gdf.to_pandas() assert repr(gdf) == repr(pdf) assert repr(gdf.index) == repr(pdf.index) assert repr(gdf.T) == repr(pdf.T) pd.reset_option("display.max_rows") pd.reset_option("display.max_columns") @pytest.mark.parametrize("nrows", [0, 1, 3, 5, 10]) @pytest.mark.parametrize("ncols", [0, 1, 2, 3]) def test_groupby_MI(nrows, ncols): gdf = cudf.DataFrame( {"a": np.arange(10), "b": np.arange(10), "c": np.arange(10)} ) pdf = gdf.to_pandas() gdg = gdf.groupby(["a", "b"], sort=True).count() pdg = pdf.groupby(["a", "b"], sort=True).count() pd.options.display.max_rows = nrows pd.options.display.max_columns = ncols assert repr(gdg) == repr(pdg) assert repr(gdg.index) == repr(pdg.index) assert repr(gdg.T) == repr(pdg.T) pd.reset_option("display.max_rows") pd.reset_option("display.max_columns") @pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES) @pytest.mark.parametrize("length", [0, 1, 10, 100, 1000]) def test_generic_index(length, dtype): psr = pd.Series( range(length), index=np.random.randint(0, high=100, size=length).astype(dtype), dtype="float64" if length == 0 else None, ) gsr = cudf.Series.from_pandas(psr) assert repr(psr.index) == repr(gsr.index) @pytest.mark.parametrize( "gdf", [ cudf.DataFrame({"a": range(10000)}), cudf.DataFrame({"a": range(10000), "b": range(10000)}), cudf.DataFrame({"a": range(20), "b": range(20)}), cudf.DataFrame( { "a": range(20), "b": range(20), "c": ["abc", "def", "xyz", "def", "pqr"] * 4, } ), cudf.DataFrame(index=[1, 2, 3]), cudf.DataFrame(index=range(10000)), cudf.DataFrame(columns=["a", "b", "c", "d"]), cudf.DataFrame(columns=["a"], index=range(10000)), cudf.DataFrame(columns=["a", "col2", "...col n"], index=range(10000)), cudf.DataFrame(index=cudf.Series(range(10000)).astype("str")), cudf.DataFrame( columns=["a", "b", "c", "d"], index=cudf.Series(range(10000)).astype("str"), ), ], ) @pytest.mark.parametrize( "slice", [ slice(2500, 5000), slice(2500, 2501), slice(5000), slice(1, 10), slice(10, 20), slice(15, 2400), ], ) @pytest.mark.parametrize("max_seq_items", [1, 10, 60, 10000, None]) @pytest.mark.parametrize("max_rows", [1, 10, 60, 10000, None]) def test_dataframe_sliced(gdf, slice, max_seq_items, max_rows): pd.options.display.max_seq_items = max_seq_items pd.options.display.max_rows = max_rows pdf = gdf.to_pandas() sliced_gdf = gdf[slice] sliced_pdf = pdf[slice] expected_repr = repr(sliced_pdf).replace("None", "<NA>") actual_repr = repr(sliced_gdf) assert expected_repr == actual_repr pd.reset_option("display.max_rows") pd.reset_option("display.max_seq_items") @pytest.mark.parametrize( "index,expected_repr", [ ( cudf.Index([1, 2, 3, None]), "Int64Index([1, 2, 3, <NA>], dtype='int64')", ), ( cudf.Index([None, 2.2, 3.324342, None]), "Float64Index([<NA>, 2.2, 3.324342, <NA>], dtype='float64')", ), ( cudf.Index([None, None, None], name="hello"), "StringIndex([None None None], dtype='object', name='hello')", ), ( cudf.Index([None, None, None], dtype="float", name="hello"), "Float64Index([<NA>, <NA>, <NA>], dtype='float64', name='hello')", ), ( cudf.Index([None], dtype="float64", name="hello"), "Float64Index([<NA>], dtype='float64', name='hello')", ), ( cudf.Index([None], dtype="int8", name="hello"), "Int8Index([<NA>], dtype='int8', name='hello')", ), ( cudf.Index([None] * 50, dtype="object"), "StringIndex([None None None None None None None None " "None None None None None None\n None None None None None None " "None None None None None None None None\n None None None None " "None None None None None None None None None None\n None None " "None None None None None None], dtype='object')", ), ( cudf.Index([None] * 20, dtype="uint32"), "UInt32Index([<NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, " "<NA>,\n <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, <NA>, " "<NA>,\n <NA>, <NA>],\n dtype='uint32')", ), ( cudf.Index( [None, 111, 22, 33, None, 23, 34, 2343, None], dtype="int16" ), "Int16Index([<NA>, 111, 22, 33, <NA>, 23, 34, 2343, <NA>], " "dtype='int16')", ), ( cudf.Index([1, 2, 3, None], dtype="category"), "CategoricalIndex([1, 2, 3, <NA>], categories=[1, 2, 3], " "ordered=False, dtype='category')", ), ( cudf.Index([None, None], dtype="category"), "CategoricalIndex([<NA>, <NA>], categories=[], ordered=False, " "dtype='category')", ), ( cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[ns]")), "DatetimeIndex([1970-01-01 00:00:00.000000010, " "1970-01-01 00:00:00.000000020," "\n 1970-01-01 00:00:00.000000030, NaT],\n " "dtype='datetime64[ns]')", ), ( cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[s]")), "DatetimeIndex([1970-01-01 00:00:10, " "1970-01-01 00:00:20, 1970-01-01 00:00:30,\n" " NaT],\n dtype='datetime64[s]')", ), ( cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[us]")), "DatetimeIndex([1970-01-01 00:00:00.000010, " "1970-01-01 00:00:00.000020,\n " "1970-01-01 00:00:00.000030, NaT],\n " "dtype='datetime64[us]')", ), ( cudf.Index(np.array([10, 20, 30, None], dtype="datetime64[ms]")), "DatetimeIndex([1970-01-01 00:00:00.010, " "1970-01-01 00:00:00.020,\n " "1970-01-01 00:00:00.030, NaT],\n " "dtype='datetime64[ms]')", ), ( cudf.Index(np.array([None] * 10, dtype="datetime64[ms]")), "DatetimeIndex([NaT, NaT, NaT, NaT, NaT, NaT, NaT, NaT, " "NaT, NaT], dtype='datetime64[ms]')", ), ], ) def test_generic_index_null(index, expected_repr): actual_repr = repr(index) assert expected_repr == actual_repr @pytest.mark.parametrize( "df,pandas_special_case", [ (pd.DataFrame({"a": [1, 2, 3]}, index=[10, 20, None]), False), ( pd.DataFrame( { "a": [1, None, 3], "string_col": ["hello", "world", "rapids"], }, index=[None, "a", "b"], ), True, ), (pd.DataFrame([], index=[None, "a", "b"]), False), (pd.DataFrame({"aa": [None, None]}, index=[None, None]), False), (pd.DataFrame({"aa": [1, 2, 3]}, index=[None, None, None]), False), ( pd.DataFrame( {"aa": [None, 2, 3]}, index=np.array([1, None, None], dtype="datetime64[ns]"), ), False, ), ( pd.DataFrame( {"aa": [None, 2, 3]}, index=np.array([100, None, None], dtype="datetime64[ns]"), ), False, ), ( pd.DataFrame( {"aa": [None, None, None]}, index=np.array([None, None, None], dtype="datetime64[ns]"), ), False, ), ( pd.DataFrame( {"aa": [1, None, 3]}, index=np.array([10, 15, None], dtype="datetime64[ns]"), ), False, ), ( pd.DataFrame( {"a": [1, 2, None], "v": [10, None, 22], "p": [100, 200, 300]} ).set_index(["a", "v"]), False, ), ( pd.DataFrame( { "a": [1, 2, None], "v": ["n", "c", "a"], "p": [None, None, None], } ).set_index(["a", "v"]), False, ), ( pd.DataFrame( { "a": np.array([1, None, None], dtype="datetime64[ns]"), "v": ["n", "c", "a"], "p": [None, None, None], } ).set_index(["a", "v"]), False, ), ], ) def test_dataframe_null_index_repr(df, pandas_special_case): pdf = df gdf = cudf.from_pandas(pdf) expected_repr = repr(pdf).replace("NaN", "<NA>").replace("None", "<NA>") actual_repr = repr(gdf) if pandas_special_case: # Pandas inconsistently print StringIndex null values # as `None` at some places and `NaN` at few other places # Whereas cudf is consistent with strings `null` values # to be printed as `None` everywhere. actual_repr = repr(gdf).replace("None", "<NA>") assert expected_repr.split() == actual_repr.split() @pytest.mark.parametrize( "sr,pandas_special_case", [ (pd.Series([1, 2, 3], index=[10, 20, None]), False), (pd.Series([1, None, 3], name="a", index=[None, "a", "b"]), True), (pd.Series(None, index=[None, "a", "b"], dtype="float"), True), (pd.Series([None, None], name="aa", index=[None, None]), False), (pd.Series([1, 2, 3], index=[None, None, None]), False), ( pd.Series( [None, 2, 3], index=np.array([1, None, None], dtype="datetime64[ns]"), ), False, ), ( pd.Series( [None, None, None], index=np.array([None, None, None], dtype="datetime64[ns]"), ), False, ), ( pd.Series( [1, None, 3], index=np.array([10, 15, None], dtype="datetime64[ns]"), ), False, ), ( pd.DataFrame( {"a": [1, 2, None], "v": [10, None, 22], "p": [100, 200, 300]} ).set_index(["a", "v"])["p"], False, ), ( pd.DataFrame( { "a": [1, 2, None], "v": ["n", "c", "a"], "p": [None, None, None], } ).set_index(["a", "v"])["p"], False, ), ( pd.DataFrame( { "a": np.array([1, None, None], dtype="datetime64[ns]"), "v": ["n", "c", "a"], "p": [None, None, None], } ).set_index(["a", "v"])["p"], False, ), ], ) def test_series_null_index_repr(sr, pandas_special_case): psr = sr gsr = cudf.from_pandas(psr) expected_repr = repr(psr).replace("NaN", "<NA>").replace("None", "<NA>") actual_repr = repr(gsr) if pandas_special_case: # Pandas inconsistently print StringIndex null values # as `None` at some places and `NaN` at few other places # Whereas cudf is consistent with strings `null` values # to be printed as `None` everywhere. actual_repr = repr(gsr).replace("None", "<NA>") assert expected_repr.split() == actual_repr.split() @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [ 136457654, 134736784, 245345345, 223432411, 2343241, 3634548734, 23234, ], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ], ) @pytest.mark.parametrize("dtype", ["timedelta64[s]", "timedelta64[us]"]) def test_timedelta_series_s_us_repr(data, dtype): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() expected = repr(psr).replace("timedelta64[ns]", dtype) actual = repr(sr) assert expected.split() == actual.split() @pytest.mark.parametrize( "ser, expected_repr", [ ( cudf.Series([], dtype="timedelta64[ns]"), textwrap.dedent( """ Series([], dtype: timedelta64[ns]) """ ), ), ( cudf.Series([], dtype="timedelta64[ms]"), textwrap.dedent( """ Series([], dtype: timedelta64[ms]) """ ), ), ( cudf.Series([1000000, 200000, 3000000], dtype="timedelta64[ns]"), textwrap.dedent( """ 0 0 days 00:00:00.001000000 1 0 days 00:00:00.000200000 2 0 days 00:00:00.003000000 dtype: timedelta64[ns] """ ), ), ( cudf.Series([1000000, 200000, 3000000], dtype="timedelta64[ms]"), textwrap.dedent( """ 0 0 days 00:16:40 1 0 days 00:03:20 2 0 days 00:50:00 dtype: timedelta64[ms] """ ), ), ( cudf.Series([1000000, 200000, None], dtype="timedelta64[ns]"), textwrap.dedent( """ 0 0 days 00:00:00.001000000 1 0 days 00:00:00.000200000 2 NaT dtype: timedelta64[ns] """ ), ), ( cudf.Series([1000000, 200000, None], dtype="timedelta64[ms]"), textwrap.dedent( """ 0 0 days 00:16:40 1 0 days 00:03:20 2 NaT dtype: timedelta64[ms] """ ), ), ( cudf.Series( [None, None, None, None, None], dtype="timedelta64[ns]" ), textwrap.dedent( """ 0 NaT 1 NaT 2 NaT 3 NaT 4 NaT dtype: timedelta64[ns] """ ), ), ( cudf.Series( [None, None, None, None, None], dtype="timedelta64[ms]" ), textwrap.dedent( """ 0 NaT 1 NaT 2 NaT 3 NaT 4 NaT dtype: timedelta64[ms] """ ), ), ( cudf.Series( [12, 12, 22, 343, 4353534, 435342], dtype="timedelta64[ns]" ), textwrap.dedent( """ 0 0 days 00:00:00.000000012 1 0 days 00:00:00.000000012 2 0 days 00:00:00.000000022 3 0 days 00:00:00.000000343 4 0 days 00:00:00.004353534 5 0 days 00:00:00.000435342 dtype: timedelta64[ns] """ ), ), ( cudf.Series( [12, 12, 22, 343, 4353534, 435342], dtype="timedelta64[ms]" ), textwrap.dedent( """ 0 0 days 00:00:00.012 1 0 days 00:00:00.012 2 0 days 00:00:00.022 3 0 days 00:00:00.343 4 0 days 01:12:33.534 5 0 days 00:07:15.342 dtype: timedelta64[ms] """ ), ), ( cudf.Series( [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], dtype="timedelta64[ns]", ), textwrap.dedent( """ 0 0 days 00:00:00.000000001 1 0 days 00:00:00.000001132 2 0 days 00:00:00.023223231 3 0 days 00:00:00.000000233 4 0 days 00:00:00 5 0 days 00:00:00.000000332 6 0 days 00:00:00.000000323 dtype: timedelta64[ns] """ ), ), ( cudf.Series( [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], dtype="timedelta64[ms]", ), textwrap.dedent( """ 0 0 days 00:00:00.001 1 0 days 00:00:01.132 2 0 days 06:27:03.231 3 0 days 00:00:00.233 4 0 days 00:00:00 5 0 days 00:00:00.332 6 0 days 00:00:00.323 dtype: timedelta64[ms] """ ), ), ( cudf.Series( [ 13645765432432, 134736784, 245345345, 223432411, 999992343241, 3634548734, 23234, ], dtype="timedelta64[ms]", ), textwrap.dedent( """ 0 157937 days 02:23:52.432 1 1 days 13:25:36.784 2 2 days 20:09:05.345 3 2 days 14:03:52.411 4 11573 days 23:39:03.241 5 42 days 01:35:48.734 6 0 days 00:00:23.234 dtype: timedelta64[ms] """ ), ), ( cudf.Series( [ 13645765432432, 134736784, 245345345, 223432411, 999992343241, 3634548734, 23234, ], dtype="timedelta64[ns]", ), textwrap.dedent( """ 0 0 days 03:47:25.765432432 1 0 days 00:00:00.134736784 2 0 days 00:00:00.245345345 3 0 days 00:00:00.223432411 4 0 days 00:16:39.992343241 5 0 days 00:00:03.634548734 6 0 days 00:00:00.000023234 dtype: timedelta64[ns] """ ), ), ( cudf.Series( [ 13645765432432, 134736784, 245345345, 223432411, 999992343241, 3634548734, 23234, ], dtype="timedelta64[ms]", name="abc", ), textwrap.dedent( """ 0 157937 days 02:23:52.432 1 1 days 13:25:36.784 2 2 days 20:09:05.345 3 2 days 14:03:52.411 4 11573 days 23:39:03.241 5 42 days 01:35:48.734 6 0 days 00:00:23.234 Name: abc, dtype: timedelta64[ms] """ ), ), ( cudf.Series( [ 13645765432432, 134736784, 245345345, 223432411, 999992343241, 3634548734, 23234, ], dtype="timedelta64[ns]", index=["a", "b", "z", "x", "y", "l", "m"], name="hello", ), textwrap.dedent( """ a 0 days 03:47:25.765432432 b 0 days 00:00:00.134736784 z 0 days 00:00:00.245345345 x 0 days 00:00:00.223432411 y 0 days 00:16:39.992343241 l 0 days 00:00:03.634548734 m 0 days 00:00:00.000023234 Name: hello, dtype: timedelta64[ns] """ ), ), ], ) def test_timedelta_series_ns_ms_repr(ser, expected_repr): expected = expected_repr actual = repr(ser) assert expected.split() == actual.split() @pytest.mark.parametrize( "df,expected_repr", [ ( cudf.DataFrame( { "a": cudf.Series( [1000000, 200000, 3000000], dtype="timedelta64[s]" ) } ), textwrap.dedent( """ a 0 11 days 13:46:40 1 2 days 07:33:20 2 34 days 17:20:00 """ ), ), ( cudf.DataFrame( { "a": cudf.Series( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[s]", ), "b": [10, 11, 22, 33, 44, 55, 66], } ), textwrap.dedent( """ a b 0 1579 days 08:54:14 10 1 NaT 11 2 2839 days 15:29:05 22 3 2586 days 00:33:31 33 4 NaT 44 5 42066 days 12:52:14 55 6 0 days 06:27:14 66 """ ), ), ( cudf.DataFrame( { "a": cudf.Series( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[s]", index=["a", "b", "c", "d", "e", "f", "g"], ) } ), textwrap.dedent( """ a a 1579 days 08:54:14 b NaT c 2839 days 15:29:05 d 2586 days 00:33:31 e NaT f 42066 days 12:52:14 g 0 days 06:27:14 """ ), ), ( cudf.DataFrame( { "a": cudf.Series( [1, 2, 3, 4, 5, 6, 7], index=cudf.Index( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[ms]", ), ) } ), textwrap.dedent( """ a 1 days 13:54:17.654 1 NaT 2 2 days 20:09:05.345 3 2 days 14:03:52.411 4 NaT 5 42 days 01:35:48.734 6 0 days 00:00:23.234 7 """ ), ), ( cudf.DataFrame( { "a": cudf.Series( ["a", "f", "q", "e", "w", "e", "t"], index=cudf.Index( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[ns]", ), ) } ), textwrap.dedent( """ a 0 days 00:00:00.136457654 a NaT f 0 days 00:00:00.245345345 q 0 days 00:00:00.223432411 e NaT w 0 days 00:00:03.634548734 e 0 days 00:00:00.000023234 t """ ), ), ], ) def test_timedelta_dataframe_repr(df, expected_repr): actual_repr = repr(df) assert actual_repr.split() == expected_repr.split() @pytest.mark.parametrize( "index, expected_repr", [ ( cudf.Index([1000000, 200000, 3000000], dtype="timedelta64[ms]"), "TimedeltaIndex(['0 days 00:16:40', " "'0 days 00:03:20', '0 days 00:50:00'], " "dtype='timedelta64[ms]')", ), ( cudf.Index( [None, None, None, None, None], dtype="timedelta64[us]" ), "TimedeltaIndex([NaT, NaT, NaT, NaT, NaT], " "dtype='timedelta64[us]')", ), ( cudf.Index( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[us]", ), "TimedeltaIndex([0 days 00:02:16.457654, NaT, " "0 days 00:04:05.345345, " "0 days 00:03:43.432411, NaT," " 0 days 01:00:34.548734, 0 days 00:00:00.023234]," " dtype='timedelta64[us]')", ), ( cudf.Index( [ 136457654, None, 245345345, 223432411, None, 3634548734, 23234, ], dtype="timedelta64[s]", ), "TimedeltaIndex([1579 days 08:54:14, NaT, 2839 days 15:29:05," " 2586 days 00:33:31, NaT, 42066 days 12:52:14, " "0 days 06:27:14]," " dtype='timedelta64[s]')", ), ], ) def test_timedelta_index_repr(index, expected_repr): actual_repr = repr(index) assert actual_repr.split() == expected_repr.split() @pytest.mark.parametrize( "pmi", [ pd.MultiIndex.from_tuples( [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")] ), pd.MultiIndex.from_tuples( [(1, "red"), (1, "blue"), (2, "red"), (2, "blue")] * 10 ), pd.MultiIndex.from_tuples([(1, "red", 102, "sdf")]), pd.MultiIndex.from_tuples( [ ("abc", 0.234, 1), ("a", -0.34, 0), ("ai", 111, 4385798), ("rapids", 0, 34534534), ], names=["alphabets", "floats", "ints"], ), ], ) @pytest.mark.parametrize("max_seq_items", [None, 1, 2, 5, 10, 100]) def test_multiindex_repr(pmi, max_seq_items): pd.set_option("display.max_seq_items", max_seq_items) gmi = cudf.from_pandas(pmi) assert repr(gmi) == repr(pmi) pd.reset_option("display.max_seq_items") @pytest.mark.parametrize( "gdi, expected_repr", [ ( cudf.DataFrame( { "a": [None, 1, 2, 3], "b": ["abc", None, "xyz", None], "c": [0.345, np.nan, 100, 10], } ) .set_index(["a", "b"]) .index, textwrap.dedent( """ MultiIndex([(<NA>, 'abc'), ( 1, <NA>), ( 2, 'xyz'), ( 3, <NA>)], names=['a', 'b']) """ ), ), ( cudf.DataFrame( { "a": cudf.Series([None, np.nan, 2, 3], nan_as_null=False), "b": ["abc", None, "xyz", None], "c": [0.345, np.nan, 100, 10], } ) .set_index(["a", "b"]) .index, textwrap.dedent( """ MultiIndex([(<NA>, 'abc'), ( nan, <NA>), ( 2.0, 'xyz'), ( 3.0, <NA>)], names=['a', 'b']) """ ), ), ( cudf.DataFrame( { "a": cudf.Series([None, 1, 2, 3], dtype="datetime64[ns]"), "b": ["abc", None, "xyz", None], "c": [0.345, np.nan, 100, 10], } ) .set_index(["a", "b"]) .index, textwrap.dedent( """ MultiIndex([( 'NaT', 'abc'), ('1970-01-01 00:00:00.000000001', <NA>), ('1970-01-01 00:00:00.000000002', 'xyz'), ('1970-01-01 00:00:00.000000003', <NA>)], names=['a', 'b']) """ ), ), ( cudf.DataFrame( { "a": cudf.Series([None, 1, 2, 3], dtype="datetime64[ns]"), "b": ["abc", None, "xyz", None], "c": [0.345, np.nan, 100, 10], } ) .set_index(["a", "b", "c"]) .index, textwrap.dedent( """ MultiIndex([( 'NaT', 'abc', 0.345), ('1970-01-01 00:00:00.000000001', <NA>, <NA>), ('1970-01-01 00:00:00.000000002', 'xyz', 100.0), ('1970-01-01 00:00:00.000000003', <NA>, 10.0)], names=['a', 'b', 'c']) """ ), ), ( cudf.DataFrame( { "a": ["abc", None, "xyz", None], "b": cudf.Series([None, 1, 2, 3], dtype="timedelta64[ns]"), "c": [0.345, np.nan, 100, 10], } ) .set_index(["a", "b", "c"]) .index, textwrap.dedent( """ MultiIndex([('abc', 'NaT', 0.345), ( <NA>, '0 days 00:00:00.000000001', <NA>), ('xyz', '0 days 00:00:00.000000002', 100.0), ( <NA>, '0 days 00:00:00.000000003', 10.0)], names=['a', 'b', 'c']) """ ), ), ( cudf.DataFrame( { "a": ["abc", None, "xyz", None], "b": cudf.Series([None, 1, 2, 3], dtype="timedelta64[ns]"), "c": [0.345, np.nan, 100, 10], } ) .set_index(["c", "a"]) .index, textwrap.dedent( """ MultiIndex([(0.345, 'abc'), ( <NA>, <NA>), (100.0, 'xyz'), ( 10.0, <NA>)], names=['c', 'a']) """ ), ), ( cudf.DataFrame( { "a": [None, None, None, None], "b": cudf.Series( [None, None, None, None], dtype="timedelta64[ns]" ), "c": [0.345, np.nan, 100, 10], } ) .set_index(["b", "a"]) .index, textwrap.dedent( """ MultiIndex([('NaT', <NA>), ('NaT', <NA>), ('NaT', <NA>), ('NaT', <NA>)], names=['b', 'a']) """ ), ), ( cudf.DataFrame( { "a": [1, 2, None, 3, 5], "b": [ "abc", "def, hi, bye", None, ", one, two, three, four", None, ], "c": cudf.Series( [0.3232, np.nan, 1, None, -0.34534], nan_as_null=False ), "d": [None, 100, 2000324, None, None], } ) .set_index(["a", "b", "c", "d"]) .index, textwrap.dedent( """ MultiIndex([( 1, 'abc', 0.3232, <NA>), ( 2, 'def, hi, bye', nan, 100), (<NA>, <NA>, 1.0, 2000324), ( 3, ', one, two, three, four', <NA>, <NA>), ( 5, <NA>, -0.34534, <NA>)], names=['a', 'b', 'c', 'd']) """ ), ), ( cudf.DataFrame( { "a": [1, 2, None, 3, 5], "b": [ "abc", "def, hi, bye", None, ", one, two, three, four", None, ], "c": cudf.Series( [0.3232, np.nan, 1, None, -0.34534], nan_as_null=False ), "d": [None, 100, 2000324, None, None], } ) .set_index(["b", "a", "c", "d"]) .index, textwrap.dedent( """ MultiIndex([( 'abc', 1, 0.3232, <NA>), ( 'def, hi, bye', 2, nan, 100), ( <NA>, <NA>, 1.0, 2000324), (', one, two, three, four', 3, <NA>, <NA>), ( <NA>, 5, -0.34534, <NA>)], names=['b', 'a', 'c', 'd']) """ ), ), ( cudf.DataFrame( { "a": ["(abc", "2", None, "3", "5"], "b": [ "abc", "def, hi, bye", None, ", one, two, three, four", None, ], "c": cudf.Series( [0.3232, np.nan, 1, None, -0.34534], nan_as_null=False ), "d": [None, 100, 2000324, None, None], } ) .set_index(["a", "b", "c", "d"]) .index, textwrap.dedent( """ MultiIndex([('(abc', 'abc', 0.3232, <NA>), ( '2', 'def, hi, bye', nan, 100), ( <NA>, <NA>, 1.0, 2000324), ( '3', ', one, two, three, four', <NA>, <NA>), ( '5', <NA>, -0.34534, <NA>)], names=['a', 'b', 'c', 'd']) """ ), ), ], ) def test_multiindex_null_repr(gdi, expected_repr): actual_repr = repr(gdi) assert actual_repr.split() == expected_repr.split() def test_categorical_series_with_nan_repr(): series = cudf.Series( [1, 2, np.nan, 10, np.nan, None], nan_as_null=False ).astype("category") expected_repr = textwrap.dedent( """ 0 1.0 1 2.0 2 NaN 3 10.0 4 NaN 5 <NA> dtype: category Categories (4, float64): [1.0, 2.0, 10.0, NaN] """ ) assert repr(series).split() == expected_repr.split() sliced_expected_repr = textwrap.dedent( """ 2 NaN 3 10.0 4 NaN 5 <NA> dtype: category Categories (4, float64): [1.0, 2.0, 10.0, NaN] """ ) assert repr(series[2:]).split() == sliced_expected_repr.split() def test_categorical_dataframe_with_nan_repr(): series = cudf.Series( [1, 2, np.nan, 10, np.nan, None], nan_as_null=False ).astype("category") df = cudf.DataFrame({"a": series}) expected_repr = textwrap.dedent( """ a 0 1.0 1 2.0 2 NaN 3 10.0 4 NaN 5 <NA> """ ) assert repr(df).split() == expected_repr.split() def test_categorical_index_with_nan_repr(): cat_index = cudf.Index( cudf.Series( [1, 2, np.nan, 10, np.nan, None], nan_as_null=False ).astype("category") ) expected_repr = ( "CategoricalIndex([1.0, 2.0, NaN, 10.0, NaN, <NA>], " "categories=[1.0, 2.0, 10.0, NaN], ordered=False, dtype='category')" ) assert repr(cat_index) == expected_repr sliced_expected_repr = ( "CategoricalIndex([NaN, 10.0, NaN, <NA>], " "categories=[1.0, 2.0, 10.0, NaN], ordered=False, dtype='category')" ) assert repr(cat_index[2:]) == sliced_expected_repr def test_empty_series_name(): ps = pd.Series([], name="abc", dtype="int") gs = cudf.from_pandas(ps) assert repr(ps) == repr(gs) def test_repr_struct_after_concat(): df = cudf.DataFrame( { "a": cudf.Series( [ {"sa": 2056831253}, {"sa": -1463792165}, {"sa": 1735783038}, {"sa": 103774433}, {"sa": -1413247520}, ] * 13 ), "b": cudf.Series( [ {"sa": {"ssa": 1140062029}}, None, {"sa": {"ssa": 1998862860}}, {"sa": None}, {"sa": {"ssa": -395088502}}, ] * 13 ), } ) pdf = df.to_pandas() assert repr(df) == repr(pdf) def test_interval_index_repr(): pi = pd.Index( [ np.nan, pd.Interval(2.0, 3.0, closed="right"), pd.Interval(3.0, 4.0, closed="right"), ] ) gi = cudf.from_pandas(pi) assert repr(pi) == repr(gi)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_binops.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import decimal import operator import random import warnings from itertools import combinations_with_replacement, product import cupy as cp import numpy as np import pandas as pd import pytest import cudf from cudf import Series from cudf.core._compat import PANDAS_GE_150 from cudf.core.buffer.spill_manager import get_global_manager from cudf.core.index import as_index from cudf.testing import _utils as utils from cudf.utils.dtypes import ( BOOL_TYPES, DATETIME_TYPES, FLOAT_TYPES, INTEGER_TYPES, NUMERIC_TYPES, TIMEDELTA_TYPES, ) STRING_TYPES = {"str"} _binops = [ operator.add, operator.sub, operator.mul, operator.floordiv, operator.truediv, operator.mod, operator.pow, ] _binops_compare = [ operator.eq, operator.ne, operator.lt, operator.le, operator.gt, operator.ge, ] _bitwise_binops = [operator.and_, operator.or_, operator.xor] _int_types = [ "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", ] _cmpops = [ operator.lt, operator.gt, operator.le, operator.ge, operator.eq, operator.ne, ] _reflected_ops = [ lambda x: 1 + x, lambda x: 2 * x, lambda x: 2 - x, lambda x: 2 // x, lambda x: 2 / x, lambda x: 3 + x, lambda x: 3 * x, lambda x: 3 - x, lambda x: 3 // x, lambda x: 3 / x, lambda x: 3 % x, lambda x: -1 + x, lambda x: -2 * x, lambda x: -2 - x, lambda x: -2 // x, lambda x: -2 / x, lambda x: -3 + x, lambda x: -3 * x, lambda x: -3 - x, lambda x: -3 // x, lambda x: -3 / x, lambda x: -3 % x, lambda x: 0 + x, lambda x: 0 * x, lambda x: 0 - x, lambda x: 0 // x, lambda x: 0 / x, ] _operators_arithmetic = [ "add", "radd", "sub", "rsub", "mul", "rmul", "mod", "rmod", "pow", "rpow", "div", "divide", "floordiv", "rfloordiv", "truediv", "rtruediv", ] _operators_comparison = ["eq", "ne", "lt", "le", "gt", "ge"] _cudf_scalar_reflected_ops = [ lambda x: cudf.Scalar(1) + x, lambda x: cudf.Scalar(2) * x, lambda x: cudf.Scalar(2) - x, lambda x: cudf.Scalar(2) // x, lambda x: cudf.Scalar(2) / x, lambda x: cudf.Scalar(3) + x, lambda x: cudf.Scalar(3) * x, lambda x: cudf.Scalar(3) - x, lambda x: cudf.Scalar(3) // x, lambda x: cudf.Scalar(3) / x, lambda x: cudf.Scalar(3) % x, lambda x: cudf.Scalar(-1) + x, lambda x: cudf.Scalar(-2) * x, lambda x: cudf.Scalar(-2) - x, lambda x: cudf.Scalar(-2) // x, lambda x: cudf.Scalar(-2) / x, lambda x: cudf.Scalar(-3) + x, lambda x: cudf.Scalar(-3) * x, lambda x: cudf.Scalar(-3) - x, lambda x: cudf.Scalar(-3) // x, lambda x: cudf.Scalar(-3) / x, lambda x: cudf.Scalar(-3) % x, lambda x: cudf.Scalar(0) + x, lambda x: cudf.Scalar(0) * x, lambda x: cudf.Scalar(0) - x, lambda x: cudf.Scalar(0) // x, lambda x: cudf.Scalar(0) / x, ] pytest_xfail = pytest.mark.xfail pytestmark = pytest.mark.spilling # If spilling is enabled globally, we skip many test permutations # to reduce running time. if get_global_manager() is not None: _binops = _binops[:1] _binops_compare = _binops_compare[:1] _int_types = _int_types[-1:] _cmpops = _cmpops[:1] _reflected_ops = _reflected_ops[:1] _operators_arithmetic = _operators_arithmetic[:1] _operators_comparison = _operators_comparison[:1] _cudf_scalar_reflected_ops = _cudf_scalar_reflected_ops[:1] DATETIME_TYPES = {"datetime64[ms]"} # noqa: F811 NUMERIC_TYPES = {"float32"} # noqa: F811 FLOAT_TYPES = {"float64"} # noqa: F811 INTEGER_TYPES = {"int16"} # noqa: F811 TIMEDELTA_TYPES = {"timedelta64[s]"} # noqa: F811 # To save time, we skip tests marked "pytest.mark.xfail" pytest_xfail = pytest.mark.skipif @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize("binop", _binops) def test_series_binop(binop, obj_class): nelem = 1000 arr1 = utils.gen_rand("float64", nelem) * 10000 # Keeping a low value because CUDA 'pow' has 2 full range error arr2 = utils.gen_rand("float64", nelem) * 10 sr1 = Series(arr1) sr2 = Series(arr2) if obj_class == "Index": sr1 = as_index(sr1) sr2 = as_index(sr2) result = binop(sr1, sr2) expect = binop(pd.Series(arr1), pd.Series(arr2)) if obj_class == "Index": result = Series(result) utils.assert_eq(result, expect) @pytest.mark.parametrize("binop", _binops) def test_series_binop_concurrent(binop): def func(index): arr = np.random.random(100) * 10 sr = Series(arr) result = binop(sr.astype("int32"), sr) expect = binop(arr.astype("int32"), arr) np.testing.assert_almost_equal(result.to_numpy(), expect, decimal=5) from concurrent.futures import ThreadPoolExecutor indices = range(10) with ThreadPoolExecutor(4) as e: # four processes list(e.map(func, indices)) @pytest.mark.parametrize("use_cudf_scalar", [False, True]) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize("nelem,binop", list(product([1, 2, 100], _binops))) def test_series_binop_scalar(nelem, binop, obj_class, use_cudf_scalar): arr = np.random.random(nelem) rhs = random.choice(arr).item() sr = Series(arr) if obj_class == "Index": sr = as_index(sr) if use_cudf_scalar: result = binop(sr, rhs) else: result = binop(sr, cudf.Scalar(rhs)) if obj_class == "Index": result = Series(result) np.testing.assert_almost_equal(result.to_numpy(), binop(arr, rhs)) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize("binop", _bitwise_binops) @pytest.mark.parametrize( "lhs_dtype,rhs_dtype", list(product(_int_types, _int_types)) ) def test_series_bitwise_binop(binop, obj_class, lhs_dtype, rhs_dtype): arr1 = (np.random.random(100) * 100).astype(lhs_dtype) sr1 = Series(arr1) arr2 = (np.random.random(100) * 100).astype(rhs_dtype) sr2 = Series(arr2) if obj_class == "Index": sr1 = as_index(sr1) sr2 = as_index(sr2) result = binop(sr1, sr2) if obj_class == "Index": result = Series(result) np.testing.assert_almost_equal(result.to_numpy(), binop(arr1, arr2)) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize("cmpop", _cmpops) @pytest.mark.parametrize( "dtype", ["int8", "int32", "int64", "float32", "float64", "datetime64[ms]"] ) def test_series_compare(cmpop, obj_class, dtype): arr1 = np.random.randint(0, 100, 100).astype(dtype) arr2 = np.random.randint(0, 100, 100).astype(dtype) sr1 = Series(arr1) sr2 = Series(arr2) if obj_class == "Index": sr1 = as_index(sr1) sr2 = as_index(sr2) result1 = cmpop(sr1, sr1) result2 = cmpop(sr2, sr2) result3 = cmpop(sr1, sr2) if obj_class == "Index": result1 = Series(result1) result2 = Series(result2) result3 = Series(result3) np.testing.assert_equal(result1.to_numpy(), cmpop(arr1, arr1)) np.testing.assert_equal(result2.to_numpy(), cmpop(arr2, arr2)) np.testing.assert_equal(result3.to_numpy(), cmpop(arr1, arr2)) def _series_compare_nulls_typegen(): return [ *combinations_with_replacement(DATETIME_TYPES, 2), *combinations_with_replacement(TIMEDELTA_TYPES, 2), *combinations_with_replacement(NUMERIC_TYPES, 2), *combinations_with_replacement(STRING_TYPES, 2), ] @pytest.mark.parametrize("cmpop", _cmpops) @pytest.mark.parametrize("dtypes", _series_compare_nulls_typegen()) def test_series_compare_nulls(cmpop, dtypes): ltype, rtype = dtypes ldata = [1, 2, None, None, 5] rdata = [2, 1, None, 4, None] lser = Series(ldata, dtype=ltype) rser = Series(rdata, dtype=rtype) lmask = ~lser.isnull() rmask = ~rser.isnull() expect_mask = np.logical_and(lmask, rmask) expect = cudf.Series([None] * 5, dtype="bool") expect[expect_mask] = cmpop(lser[expect_mask], rser[expect_mask]) got = cmpop(lser, rser) utils.assert_eq(expect, got) @pytest.fixture def str_series_cmp_data(): return pd.Series(["a", "b", None, "d", "e", None], dtype="string") @pytest.fixture(ids=[op.__name__ for op in _cmpops], params=_cmpops) def str_series_compare_str_cmpop(request): return request.param @pytest.fixture(ids=["eq", "ne"], params=[operator.eq, operator.ne]) def str_series_compare_num_cmpop(request): return request.param @pytest.fixture(ids=["int", "float", "bool"], params=[1, 1.5, True]) def cmp_scalar(request): return request.param def test_str_series_compare_str( str_series_cmp_data, str_series_compare_str_cmpop ): expect = str_series_compare_str_cmpop(str_series_cmp_data, "a") got = str_series_compare_str_cmpop( Series.from_pandas(str_series_cmp_data), "a" ) utils.assert_eq(expect, got.to_pandas(nullable=True)) def test_str_series_compare_str_reflected( str_series_cmp_data, str_series_compare_str_cmpop ): expect = str_series_compare_str_cmpop("a", str_series_cmp_data) got = str_series_compare_str_cmpop( "a", Series.from_pandas(str_series_cmp_data) ) utils.assert_eq(expect, got.to_pandas(nullable=True)) def test_str_series_compare_num( str_series_cmp_data, str_series_compare_num_cmpop, cmp_scalar ): expect = str_series_compare_num_cmpop(str_series_cmp_data, cmp_scalar) got = str_series_compare_num_cmpop( Series.from_pandas(str_series_cmp_data), cmp_scalar ) utils.assert_eq(expect, got.to_pandas(nullable=True)) def test_str_series_compare_num_reflected( str_series_cmp_data, str_series_compare_num_cmpop, cmp_scalar ): expect = str_series_compare_num_cmpop(cmp_scalar, str_series_cmp_data) got = str_series_compare_num_cmpop( cmp_scalar, Series.from_pandas(str_series_cmp_data) ) utils.assert_eq(expect, got.to_pandas(nullable=True)) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize("nelem", [1, 2, 100]) @pytest.mark.parametrize("cmpop", _cmpops) @pytest.mark.parametrize("dtype", utils.NUMERIC_TYPES + ["datetime64[ms]"]) @pytest.mark.parametrize("use_cudf_scalar", [True, False]) def test_series_compare_scalar( nelem, cmpop, obj_class, dtype, use_cudf_scalar ): arr1 = np.random.randint(0, 100, 100).astype(dtype) sr1 = Series(arr1) rhs = random.choice(arr1).item() if use_cudf_scalar: rhs = cudf.Scalar(rhs) if obj_class == "Index": sr1 = as_index(sr1) result1 = cmpop(sr1, rhs) result2 = cmpop(rhs, sr1) if obj_class == "Index": result1 = Series(result1) result2 = Series(result2) np.testing.assert_equal(result1.to_numpy(), cmpop(arr1, rhs)) np.testing.assert_equal(result2.to_numpy(), cmpop(rhs, arr1)) _nulls = ["none", "some"] @pytest.mark.parametrize("nelem", [1, 7, 8, 9, 32, 64, 128]) @pytest.mark.parametrize("lhs_nulls,rhs_nulls", list(product(_nulls, _nulls))) def test_validity_add(nelem, lhs_nulls, rhs_nulls): np.random.seed(0) # LHS lhs_data = np.random.random(nelem) if lhs_nulls == "some": lhs_mask = utils.random_bitmask(nelem) lhs_bitmask = utils.expand_bits_to_bytes(lhs_mask)[:nelem] lhs_null_count = utils.count_zero(lhs_bitmask) assert lhs_null_count >= 0 lhs = Series.from_masked_array(lhs_data, lhs_mask) assert lhs.null_count == lhs_null_count else: lhs = Series(lhs_data) # RHS rhs_data = np.random.random(nelem) if rhs_nulls == "some": rhs_mask = utils.random_bitmask(nelem) rhs_bitmask = utils.expand_bits_to_bytes(rhs_mask)[:nelem] rhs_null_count = utils.count_zero(rhs_bitmask) assert rhs_null_count >= 0 rhs = Series.from_masked_array(rhs_data, rhs_mask) assert rhs.null_count == rhs_null_count else: rhs = Series(rhs_data) # Result res = lhs + rhs if lhs_nulls == "some" and rhs_nulls == "some": res_mask = np.asarray( utils.expand_bits_to_bytes(lhs_mask & rhs_mask), dtype=np.bool_ )[:nelem] if lhs_nulls == "some" and rhs_nulls == "none": res_mask = np.asarray( utils.expand_bits_to_bytes(lhs_mask), dtype=np.bool_ )[:nelem] if lhs_nulls == "none" and rhs_nulls == "some": res_mask = np.asarray( utils.expand_bits_to_bytes(rhs_mask), dtype=np.bool_ )[:nelem] # Fill NA values na_value = -10000 got = res.fillna(na_value).to_numpy() expect = lhs_data + rhs_data if lhs_nulls == "some" or rhs_nulls == "some": expect[~res_mask] = na_value np.testing.assert_array_equal(expect, got) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize( "binop,lhs_dtype,rhs_dtype", list( product( [operator.add, operator.mul], utils.NUMERIC_TYPES, utils.NUMERIC_TYPES, ) ), ) def test_series_binop_mixed_dtype(binop, lhs_dtype, rhs_dtype, obj_class): nelem = 10 lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype) rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype) sr1 = Series(lhs) sr2 = Series(rhs) if obj_class == "Index": sr1 = as_index(sr1) sr2 = as_index(sr2) result = binop(Series(sr1), Series(sr2)) if obj_class == "Index": result = Series(result) np.testing.assert_almost_equal(result.to_numpy(), binop(lhs, rhs)) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize( "cmpop,lhs_dtype,rhs_dtype", list(product(_cmpops, utils.NUMERIC_TYPES, utils.NUMERIC_TYPES)), ) def test_series_cmpop_mixed_dtype(cmpop, lhs_dtype, rhs_dtype, obj_class): nelem = 5 lhs = (np.random.random(nelem) * nelem).astype(lhs_dtype) rhs = (np.random.random(nelem) * nelem).astype(rhs_dtype) sr1 = Series(lhs) sr2 = Series(rhs) if obj_class == "Index": sr1 = as_index(sr1) sr2 = as_index(sr2) result = cmpop(Series(sr1), Series(sr2)) if obj_class == "Index": result = Series(result) np.testing.assert_array_equal(result.to_numpy(), cmpop(lhs, rhs)) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize( "func, dtype", list(product(_reflected_ops, utils.NUMERIC_TYPES)) ) def test_series_reflected_ops_scalar(func, dtype, obj_class): # create random series np.random.seed(12) random_series = utils.gen_rand(dtype, 100, low=10) # gpu series gs = Series(random_series) # class typing if obj_class == "Index": gs = as_index(gs) gs_result = func(gs) # class typing if obj_class == "Index": gs = Series(gs) # pandas ps_result = func(random_series) # verify np.testing.assert_allclose(ps_result, gs_result.to_numpy()) @pytest.mark.parametrize( "func, dtype", list(product(_reflected_ops, utils.NUMERIC_TYPES)) ) def test_cudf_scalar_reflected_ops_scalar(func, dtype): value = 42 scalar = cudf.Scalar(42) expected = func(value) actual = func(scalar).value assert np.isclose(expected, actual) @pytest.mark.parametrize("obj_class", ["Series", "Index"]) @pytest.mark.parametrize( "funcs, dtype", list( product( list(zip(_reflected_ops, _cudf_scalar_reflected_ops)), utils.NUMERIC_TYPES, ) ), ) def test_series_reflected_ops_cudf_scalar(funcs, dtype, obj_class): cpu_func, gpu_func = funcs # create random series np.random.seed(12) random_series = utils.gen_rand(dtype, 100, low=10) # gpu series gs = Series(random_series) # class typing if obj_class == "Index": gs = as_index(gs) gs_result = gpu_func(gs) # class typing if obj_class == "Index": gs = Series(gs) # pandas ps_result = cpu_func(random_series) # verify np.testing.assert_allclose(ps_result, gs_result.to_numpy()) @pytest.mark.parametrize("binop", _binops) def test_different_shapes_and_columns(binop): # TODO: support `pow()` on NaN values. Particularly, the cases: # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1` if binop is operator.pow: return # Empty frame on the right side pd_frame = binop(pd.DataFrame({"x": [1, 2]}), pd.DataFrame({})) cd_frame = binop(cudf.DataFrame({"x": [1, 2]}), cudf.DataFrame({})) utils.assert_eq(cd_frame, pd_frame) # Empty frame on the left side pd_frame = pd.DataFrame({}) + pd.DataFrame({"x": [1, 2]}) cd_frame = cudf.DataFrame({}) + cudf.DataFrame({"x": [1, 2]}) utils.assert_eq(cd_frame, pd_frame) # Note: the below rely on a discrepancy between cudf and pandas # While pandas inserts columns in alphabetical order, cudf inserts in the # order of whichever column comes first. So the following code will not # work if the names of columns are reversed i.e. ('y', 'x') != ('x', 'y') # More rows on the left side pd_frame = pd.DataFrame({"x": [1, 2, 3]}) + pd.DataFrame({"y": [1, 2]}) cd_frame = cudf.DataFrame({"x": [1, 2, 3]}) + cudf.DataFrame({"y": [1, 2]}) utils.assert_eq(cd_frame, pd_frame) # More rows on the right side pd_frame = pd.DataFrame({"x": [1, 2]}) + pd.DataFrame({"y": [1, 2, 3]}) cd_frame = cudf.DataFrame({"x": [1, 2]}) + cudf.DataFrame({"y": [1, 2, 3]}) utils.assert_eq(cd_frame, pd_frame) @pytest.mark.parametrize("binop", _binops) def test_different_shapes_and_same_columns(binop): # TODO: support `pow()` on NaN values. Particularly, the cases: # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1` if binop is operator.pow: return pd_frame = binop( pd.DataFrame({"x": [1, 2]}), pd.DataFrame({"x": [1, 2, 3]}) ) cd_frame = binop( cudf.DataFrame({"x": [1, 2]}), cudf.DataFrame({"x": [1, 2, 3]}) ) # cast x as float64 so it matches pandas dtype cd_frame["x"] = cd_frame["x"].astype(np.float64) utils.assert_eq(cd_frame, pd_frame) @pytest.mark.parametrize("binop", _binops) def test_different_shapes_and_columns_with_unaligned_indices(binop): # TODO: support `pow()` on NaN values. Particularly, the cases: # `pow(1, NaN) == 1` and `pow(NaN, 0) == 1` if binop is operator.pow: return # Test with a RangeIndex pdf1 = pd.DataFrame({"x": [4, 3, 2, 1], "y": [7, 3, 8, 6]}) # Test with a GenericIndex pdf2 = pd.DataFrame( {"x": [1, 2, 3, 7], "y": [4, 5, 6, 7]}, index=[0, 1, 3, 4] ) # Test with a GenericIndex in a different order pdf3 = pd.DataFrame( {"x": [4, 5, 6, 7], "y": [1, 2, 3, 7], "z": [0, 5, 3, 7]}, index=[0, 3, 5, 3], ) gdf1 = cudf.DataFrame.from_pandas(pdf1) gdf2 = cudf.DataFrame.from_pandas(pdf2) gdf3 = cudf.DataFrame.from_pandas(pdf3) pd_frame = binop(binop(pdf1, pdf2), pdf3) cd_frame = binop(binop(gdf1, gdf2), gdf3) # cast x and y as float64 so it matches pandas dtype cd_frame["x"] = cd_frame["x"].astype(np.float64) cd_frame["y"] = cd_frame["y"].astype(np.float64) utils.assert_eq(cd_frame, pd_frame) pdf1 = pd.DataFrame({"x": [1, 1]}, index=["a", "a"]) pdf2 = pd.DataFrame({"x": [2]}, index=["a"]) gdf1 = cudf.DataFrame.from_pandas(pdf1) gdf2 = cudf.DataFrame.from_pandas(pdf2) pd_frame = binop(pdf1, pdf2) cd_frame = binop(gdf1, gdf2) utils.assert_eq(pd_frame, cd_frame) @pytest.mark.parametrize( "df2", [ cudf.DataFrame({"a": [3, 2, 1]}, index=[3, 2, 1]), cudf.DataFrame([3, 2]), ], ) @pytest.mark.parametrize("binop", [operator.eq, operator.ne]) def test_df_different_index_shape(df2, binop): df1 = cudf.DataFrame([1, 2, 3], index=[1, 2, 3]) pdf1 = df1.to_pandas() pdf2 = df2.to_pandas() utils.assert_exceptions_equal( lfunc=binop, rfunc=binop, lfunc_args_and_kwargs=([pdf1, pdf2],), rfunc_args_and_kwargs=([df1, df2],), ) @pytest.mark.parametrize("op", [operator.eq, operator.ne]) def test_boolean_scalar_binop(op): psr = pd.Series(np.random.choice([True, False], 10)) gsr = cudf.from_pandas(psr) utils.assert_eq(op(psr, True), op(gsr, True)) utils.assert_eq(op(psr, False), op(gsr, False)) # cuDF scalar utils.assert_eq(op(psr, True), op(gsr, cudf.Scalar(True))) utils.assert_eq(op(psr, False), op(gsr, cudf.Scalar(False))) @pytest.mark.parametrize("func", _operators_arithmetic) @pytest.mark.parametrize("has_nulls", [True, False]) @pytest.mark.parametrize("fill_value", [None, 27]) @pytest.mark.parametrize("dtype", ["float32", "float64"]) def test_operator_func_between_series(dtype, func, has_nulls, fill_value): count = 1000 gdf_series_a = utils.gen_rand_series( dtype, count, has_nulls=has_nulls, stride=10000 ) gdf_series_b = utils.gen_rand_series( dtype, count, has_nulls=has_nulls, stride=100 ) pdf_series_a = gdf_series_a.to_pandas() pdf_series_b = gdf_series_b.to_pandas() gdf_result = getattr(gdf_series_a, func)( gdf_series_b, fill_value=fill_value ) pdf_result = getattr(pdf_series_a, func)( pdf_series_b, fill_value=fill_value ) utils.assert_eq(pdf_result, gdf_result) @pytest.mark.parametrize("func", _operators_arithmetic) @pytest.mark.parametrize("has_nulls", [True, False]) @pytest.mark.parametrize("fill_value", [None, 27]) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("use_cudf_scalar", [False, True]) def test_operator_func_series_and_scalar( dtype, func, has_nulls, fill_value, use_cudf_scalar ): count = 1000 scalar = 59 gdf_series = utils.gen_rand_series( dtype, count, has_nulls=has_nulls, stride=10000 ) pdf_series = gdf_series.to_pandas() gdf_series_result = getattr(gdf_series, func)( cudf.Scalar(scalar) if use_cudf_scalar else scalar, fill_value=fill_value, ) pdf_series_result = getattr(pdf_series, func)( scalar, fill_value=fill_value ) utils.assert_eq(pdf_series_result, gdf_series_result) _permu_values = [0, 1, None, np.nan] @pytest.mark.parametrize("fill_value", _permu_values) @pytest.mark.parametrize("scalar_a", _permu_values) @pytest.mark.parametrize("scalar_b", _permu_values) @pytest.mark.parametrize("func", _operators_comparison) @pytest.mark.parametrize("dtype", ["float32", "float64"]) def test_operator_func_between_series_logical( dtype, func, scalar_a, scalar_b, fill_value ): gdf_series_a = Series([scalar_a], nan_as_null=False).astype(dtype) gdf_series_b = Series([scalar_b], nan_as_null=False).astype(dtype) pdf_series_a = gdf_series_a.to_pandas(nullable=True) pdf_series_b = gdf_series_b.to_pandas(nullable=True) gdf_series_result = getattr(gdf_series_a, func)( gdf_series_b, fill_value=fill_value ) pdf_series_result = getattr(pdf_series_a, func)( pdf_series_b, fill_value=fill_value ) expect = pdf_series_result got = gdf_series_result.to_pandas(nullable=True) # If fill_value is np.nan, things break down a bit, # because setting a NaN into a pandas nullable float # array still gets transformed to <NA>. As such, # pd_series_with_nulls.fillna(np.nan) has no effect. if ( (pdf_series_a.isnull().sum() != pdf_series_b.isnull().sum()) and np.isscalar(fill_value) and np.isnan(fill_value) ): with pytest.raises(AssertionError): utils.assert_eq(expect, got) return utils.assert_eq(expect, got) @pytest.mark.parametrize("dtype", ["float32", "float64"]) @pytest.mark.parametrize("func", _operators_comparison) @pytest.mark.parametrize("has_nulls", [True, False]) @pytest.mark.parametrize("scalar", [-59.0, np.nan, 0, 59.0]) @pytest.mark.parametrize("fill_value", [None, 1.0]) @pytest.mark.parametrize("use_cudf_scalar", [False, True]) def test_operator_func_series_and_scalar_logical( dtype, func, has_nulls, scalar, fill_value, use_cudf_scalar ): gdf_series = utils.gen_rand_series( dtype, 1000, has_nulls=has_nulls, stride=10000 ) pdf_series = gdf_series.to_pandas(nullable=True) gdf_series_result = getattr(gdf_series, func)( cudf.Scalar(scalar) if use_cudf_scalar else scalar, fill_value=fill_value, ) pdf_series_result = getattr(pdf_series, func)( scalar, fill_value=fill_value ) expect = pdf_series_result got = gdf_series_result.to_pandas(nullable=True) utils.assert_eq(expect, got) @pytest.mark.parametrize("func", _operators_arithmetic) @pytest.mark.parametrize("nulls", _nulls) @pytest.mark.parametrize("fill_value", [None, 27]) @pytest.mark.parametrize("other", ["df", "scalar"]) def test_operator_func_dataframe(func, nulls, fill_value, other): num_rows = 100 num_cols = 3 def gen_df(): pdf = pd.DataFrame() from string import ascii_lowercase cols = np.random.choice(num_cols + 5, num_cols, replace=False) for i in range(num_cols): colname = ascii_lowercase[cols[i]] data = utils.gen_rand("float64", num_rows) * 10000 if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan pdf[colname] = data return pdf pdf1 = gen_df() pdf2 = gen_df() if other == "df" else 59.0 gdf1 = cudf.DataFrame.from_pandas(pdf1) gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == "df" else 59.0 got = getattr(gdf1, func)(gdf2, fill_value=fill_value) expect = getattr(pdf1, func)(pdf2, fill_value=fill_value)[list(got._data)] utils.assert_eq(expect, got) @pytest.mark.parametrize("func", _operators_comparison) @pytest.mark.parametrize("nulls", _nulls) @pytest.mark.parametrize("other", ["df", "scalar"]) def test_logical_operator_func_dataframe(func, nulls, other): np.random.seed(0) num_rows = 100 num_cols = 3 def gen_df(): pdf = pd.DataFrame() from string import ascii_lowercase cols = np.random.choice(num_cols + 5, num_cols, replace=False) for i in range(num_cols): colname = ascii_lowercase[cols[i]] data = utils.gen_rand("float64", num_rows) * 10000 if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan pdf[colname] = data return pdf pdf1 = gen_df() pdf2 = gen_df() if other == "df" else 59.0 gdf1 = cudf.DataFrame.from_pandas(pdf1) gdf2 = cudf.DataFrame.from_pandas(pdf2) if other == "df" else 59.0 got = getattr(gdf1, func)(gdf2) expect = getattr(pdf1, func)(pdf2)[list(got._data)] utils.assert_eq(expect, got) @pytest.mark.parametrize( "func", [op for op in _operators_arithmetic if op not in {"rmod", "rfloordiv"}] + _operators_comparison + [ pytest.param( "rmod", marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/12162" ), ), pytest.param( "rfloordiv", marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/12162" ), ), ], ) @pytest.mark.parametrize("rhs", [0, 1, 2, 128]) def test_binop_bool_uint(func, rhs): psr = pd.Series([True, False, False]) gsr = cudf.from_pandas(psr) utils.assert_eq( getattr(psr, func)(rhs), getattr(gsr, func)(rhs), check_dtype=False ) @pytest.mark.parametrize( "series_dtype", (np.int8, np.uint8, np.int64, np.uint64) ) @pytest.mark.parametrize( "divisor_dtype", ( np.int8, np.uint8, np.int64, np.uint64, ), ) @pytest.mark.parametrize("scalar_divisor", [False, True]) def test_floordiv_zero_float64(series_dtype, divisor_dtype, scalar_divisor): sr = pd.Series([1, 2, 3], dtype=series_dtype) cr = cudf.from_pandas(sr) if scalar_divisor: pd_div = divisor_dtype(0) cudf_div = cudf.Scalar(0, dtype=divisor_dtype) else: pd_div = pd.Series([0], dtype=divisor_dtype) cudf_div = cudf.from_pandas(pd_div) utils.assert_eq(sr // pd_div, cr // cudf_div) @pytest.mark.parametrize("scalar_divisor", [False, True]) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/12162") def test_floordiv_zero_bool(scalar_divisor): sr = pd.Series([True, True, False], dtype=np.bool_) cr = cudf.from_pandas(sr) if scalar_divisor: pd_div = np.bool_(0) cudf_div = cudf.Scalar(0, dtype=np.bool_) else: pd_div = pd.Series([0], dtype=np.bool_) cudf_div = cudf.from_pandas(pd_div) with pytest.raises((NotImplementedError, ZeroDivisionError)): # Pandas does raise sr // pd_div with pytest.raises((NotImplementedError, ZeroDivisionError)): # Cudf does not cr // cudf_div @pytest.mark.parametrize( "dtype", ( pytest.param( np.bool_, marks=pytest_xfail( reason=( "Pandas handling of division by zero-bool is too strange" ) ), ), np.int8, np.uint8, np.int64, np.uint64, np.float32, np.float64, ), ) def test_rmod_zero_nan(dtype): sr = pd.Series([1, 1, 0], dtype=dtype) cr = cudf.from_pandas(sr) utils.assert_eq(1 % sr, 1 % cr) expected_dtype = np.float64 if cr.dtype.kind != "f" else dtype utils.assert_eq(1 % cr, cudf.Series([0, 0, None], dtype=expected_dtype)) def test_series_misc_binop(): pds = pd.Series([1, 2, 4], name="abc xyz") gds = cudf.Series([1, 2, 4], name="abc xyz") utils.assert_eq(pds + 1, gds + 1) utils.assert_eq(1 + pds, 1 + gds) utils.assert_eq(pds + pds, gds + gds) pds1 = pd.Series([1, 2, 4], name="hello world") gds1 = cudf.Series([1, 2, 4], name="hello world") utils.assert_eq(pds + pds1, gds + gds1) utils.assert_eq(pds1 + pds, gds1 + gds) utils.assert_eq(pds1 + pds + 5, gds1 + gds + 5) def test_int8_float16_binop(): a = cudf.Series([1], dtype="int8") b = np.float16(2) expect = cudf.Series([0.5]) got = a / b utils.assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize("dtype", ["int64", "float64", "str"]) def test_vector_to_none_binops(dtype): data = Series([1, 2, 3, None], dtype=dtype) expect = Series([None] * 4).astype(dtype) got = data + None utils.assert_eq(expect, got) def dtype_scalar(val, dtype): if dtype == "str": return str(val) dtype = cudf.dtype(dtype) if dtype.type in {np.datetime64, np.timedelta64}: res, _ = np.datetime_data(dtype) return dtype.type(val, res) else: return dtype.type(val) def make_scalar_add_data(): valid = set() # to any int, we may add any kind of # other int, float, datetime timedelta, or bool valid |= set( product( INTEGER_TYPES, FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # to any float, we may add any int, float, or bool valid |= set( product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES) ) # to any datetime, we may add any int, timedelta, or bool valid |= set( product(DATETIME_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES) ) # to any timedelta, we may add any int, datetime, other timedelta, or bool valid |= set( product(TIMEDELTA_TYPES, INTEGER_TYPES | DATETIME_TYPES | BOOL_TYPES) ) # to any bool, we may add any int, float, datetime, timedelta, or bool valid |= set( product( BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # to any string, we may add any other string valid |= {("str", "str")} return sorted(list(valid)) def make_invalid_scalar_add_data(): invalid = set() # we can not add a datetime to a float invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES)) # We can not add a timedelta to a float invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES)) # we can not add a float to any datetime invalid |= set(product(DATETIME_TYPES, FLOAT_TYPES)) # can can not add a datetime to a datetime invalid |= set(product(DATETIME_TYPES, DATETIME_TYPES)) # can not add a timedelta to a float invalid |= set(product(FLOAT_TYPES, TIMEDELTA_TYPES)) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_add_data()) def test_scalar_add(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) # expect = np.add(lval_host, rval_host) expect = lval_host + rval_host got = lval_gpu + rval_gpu assert expect == got.value if not dtype_l == dtype_r == "str": assert expect.dtype == got.dtype @pytest.mark.parametrize("dtype_l,dtype_r", make_invalid_scalar_add_data()) def test_scalar_add_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu + rval_gpu def make_scalar_difference_data(): valid = set() # from an int, we may subtract any int, float, timedelta, # or boolean value valid |= set( product( INTEGER_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # from any float, we may subtract any int, float, or bool valid |= set( product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES) ) # from any datetime we may subtract any int, datetime, timedelta, or bool valid |= set( product( DATETIME_TYPES, INTEGER_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # from any timedelta we may subtract any int, timedelta, or bool valid |= set( product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES | BOOL_TYPES) ) # from any bool we may subtract any int, float or timedelta valid |= set( product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES) ) return sorted(list(valid)) def make_scalar_difference_data_invalid(): invalid = set() # we can't subtract a datetime from an int invalid |= set(product(INTEGER_TYPES, DATETIME_TYPES)) # we can't subtract a datetime or timedelta from a float invalid |= set(product(FLOAT_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES)) # we can't subtract a float from a datetime or timedelta invalid |= set(product(DATETIME_TYPES | TIMEDELTA_TYPES, FLOAT_TYPES)) # We can't subtract a datetime from a timedelta invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES)) # we can't subtract a datetime or bool from a bool invalid |= set(product(BOOL_TYPES, BOOL_TYPES | DATETIME_TYPES)) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_difference_data()) def test_scalar_difference(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) expect = lval_host - rval_host got = lval_gpu - rval_gpu assert expect == got.value assert expect.dtype == got.dtype @pytest.mark.parametrize( "dtype_l,dtype_r", make_scalar_difference_data_invalid() ) def test_scalar_difference_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu - rval_gpu def make_scalar_product_data(): valid = set() # we can multiply an int, or bool by any int, float, timedelta, or bool valid |= set( product( INTEGER_TYPES | BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # we can multiply any timedelta by any int, or bool valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | BOOL_TYPES)) # we can multiply a float by any int, float, or bool valid |= set( product(FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES) ) return sorted(list(valid)) def make_scalar_product_data_invalid(): invalid = set() # can't multiply a ints, floats, datetimes, timedeltas, # or bools by datetimes invalid |= set( product( INTEGER_TYPES | FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, DATETIME_TYPES, ) ) # can't multiply datetimes with anything really invalid |= set( product( DATETIME_TYPES, INTEGER_TYPES | FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # can't multiply timedeltas by timedeltas invalid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES)) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_product_data()) def test_scalar_product(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) expect = lval_host * rval_host got = lval_gpu * rval_gpu assert expect == got.value assert expect.dtype == got.dtype @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_product_data_invalid()) def test_scalar_product_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu * rval_gpu def make_scalar_floordiv_data(): valid = set() # we can divide ints and floats by other ints, floats, or bools valid |= set( product( INTEGER_TYPES | FLOAT_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, ) ) # we can divide timedeltas by ints, floats or other timedeltas valid |= set( product(TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES) ) # we can divide bools by ints, floats or bools valid |= set(product(BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES)) return sorted(list(valid)) def make_scalar_floordiv_data_invalid(): invalid = set() # we can't numeric types into datelike types invalid |= set( product( INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES, ) ) # we can't divide datetime types into anything invalid |= set( product( DATETIME_TYPES, INTEGER_TYPES | FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # we can't divide timedeltas into bools, or datetimes invalid |= set(product(TIMEDELTA_TYPES, BOOL_TYPES | DATETIME_TYPES)) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_floordiv_data()) def test_scalar_floordiv(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) expect = lval_host // rval_host got = lval_gpu // rval_gpu assert expect == got.value assert expect.dtype == got.dtype @pytest.mark.parametrize( "dtype_l,dtype_r", make_scalar_floordiv_data_invalid() ) def test_scalar_floordiv_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu // rval_gpu def make_scalar_truediv_data(): valid = set() # we can true divide ints, floats, or bools by other # ints, floats or bools valid |= set( product( INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, ) ) # we can true divide timedeltas by ints floats or timedeltas valid |= set(product(TIMEDELTA_TYPES, INTEGER_TYPES | TIMEDELTA_TYPES)) return sorted(list(valid)) def make_scalar_truediv_data_invalid(): invalid = set() # we can't divide ints, floats or bools by datetimes # or timedeltas invalid |= set( product( INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES, ) ) # we cant true divide datetime types by anything invalid |= set( product( DATETIME_TYPES, INTEGER_TYPES | FLOAT_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | BOOL_TYPES, ) ) # we cant true divide timedeltas by datetimes or bools or floats invalid |= set( product(TIMEDELTA_TYPES, DATETIME_TYPES | BOOL_TYPES | FLOAT_TYPES) ) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_truediv_data()) def test_scalar_truediv(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) expect = np.true_divide(lval_host, rval_host) got = lval_gpu / rval_gpu assert expect == got.value # numpy bug if np.dtype(dtype_l).itemsize <= 2 and np.dtype(dtype_r).itemsize <= 2: assert expect.dtype == "float64" and got.dtype == "float32" else: assert expect.dtype == got.dtype # assert expect.dtype == got.dtype @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_truediv_data_invalid()) def test_scalar_truediv_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu / rval_gpu def make_scalar_remainder_data(): valid = set() # can mod numeric types with each other valid |= set( product( INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, ) ) # can mod timedeltas by other timedeltas valid |= set(product(TIMEDELTA_TYPES, TIMEDELTA_TYPES)) return sorted(list(valid)) def make_scalar_remainder_data_invalid(): invalid = set() # numeric types cant be modded against timedeltas # or datetimes. Also, datetimes can't be modded # against datetimes or timedeltas invalid |= set( product( INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES | DATETIME_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES, ) ) # datetime and timedelta types cant be modded against # any numeric types invalid |= set( product( DATETIME_TYPES | TIMEDELTA_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, ) ) # timedeltas cant mod with datetimes invalid |= set(product(TIMEDELTA_TYPES, DATETIME_TYPES)) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_remainder_data()) def test_scalar_remainder(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) expect = lval_host % rval_host got = lval_gpu % rval_gpu assert expect == got.value assert expect.dtype == got.dtype @pytest.mark.parametrize( "dtype_l,dtype_r", make_scalar_remainder_data_invalid() ) def test_scalar_remainder_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu % rval_gpu def make_scalar_power_data(): # only numeric values form valid operands for power return sorted( product( INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, ) ) def make_scalar_power_data_invalid(): invalid = set() # datetimes and timedeltas cant go in exponents invalid |= set( product( INTEGER_TYPES | FLOAT_TYPES | TIMEDELTA_TYPES | DATETIME_TYPES | BOOL_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES, ) ) # datetimes and timedeltas may not be raised to # any exponent of any dtype invalid |= set( product( DATETIME_TYPES | TIMEDELTA_TYPES, DATETIME_TYPES | TIMEDELTA_TYPES | INTEGER_TYPES | FLOAT_TYPES | BOOL_TYPES, ) ) return sorted(list(invalid)) @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_power_data()) def test_scalar_power(dtype_l, dtype_r): test_value = 1 lval_host = dtype_scalar(test_value, dtype=dtype_l) rval_host = dtype_scalar(test_value, dtype=dtype_r) lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) expect = lval_host**rval_host got = lval_gpu**rval_gpu assert expect == got.value assert expect.dtype == got.dtype @pytest.mark.parametrize("dtype_l,dtype_r", make_scalar_power_data_invalid()) def test_scalar_power_invalid(dtype_l, dtype_r): test_value = 1 lval_gpu = cudf.Scalar(test_value, dtype=dtype_l) rval_gpu = cudf.Scalar(test_value, dtype=dtype_r) with pytest.raises(TypeError): lval_gpu**rval_gpu def make_scalar_null_binops_data(): return ( [(operator.add, *dtypes) for dtypes in make_scalar_add_data()] + [(operator.sub, *dtypes) for dtypes in make_scalar_difference_data()] + [(operator.mul, *dtypes) for dtypes in make_scalar_product_data()] + [(operator.add, *dtypes) for dtypes in make_scalar_add_data()] + [ (operator.floordiv, *dtypes) for dtypes in make_scalar_floordiv_data() ] + [ (operator.truediv, *dtypes) for dtypes in make_scalar_truediv_data() ] + [(operator.mod, *dtypes) for dtypes in make_scalar_remainder_data()] + [(operator.pow, *dtypes) for dtypes in make_scalar_power_data()] ) @pytest.mark.parametrize("op,dtype_l,dtype_r", make_scalar_null_binops_data()) def test_scalar_null_binops(op, dtype_l, dtype_r): lhs = cudf.Scalar(cudf.NA, dtype=dtype_l) rhs = cudf.Scalar(cudf.NA, dtype=dtype_r) result = op(lhs, rhs) assert result.value is ( cudf.NaT if cudf.api.types.is_datetime64_dtype(result.dtype) or cudf.api.types.is_timedelta64_dtype(result.dtype) else cudf.NA ) # make sure dtype is the same as had there been a valid scalar valid_lhs = cudf.Scalar(1, dtype=dtype_l) valid_rhs = cudf.Scalar(1, dtype=dtype_r) valid_result = op(valid_lhs, valid_rhs) assert result.dtype == valid_result.dtype @pytest.mark.parametrize( "date_col", [ [ "2000-01-01 00:00:00.012345678", "2000-01-31 00:00:00.012345678", "2000-02-29 00:00:00.012345678", ] ], ) @pytest.mark.parametrize("n_periods", [0, 1, -1, 12, -12]) @pytest.mark.parametrize( "frequency", [ "months", "years", "days", "hours", "minutes", "seconds", "microseconds", pytest.param( "nanoseconds", marks=pytest_xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/issues/36589", ), ), ], ) @pytest.mark.parametrize( "dtype", ["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"], ) @pytest.mark.parametrize("op", [operator.add, operator.sub]) def test_datetime_dateoffset_binaryop( date_col, n_periods, frequency, dtype, op ): gsr = cudf.Series(date_col, dtype=dtype) psr = gsr.to_pandas() # converts to nanos kwargs = {frequency: n_periods} goffset = cudf.DateOffset(**kwargs) poffset = pd.DateOffset(**kwargs) expect = op(psr, poffset) got = op(gsr, goffset) utils.assert_eq(expect, got) expect = op(psr, -poffset) got = op(gsr, -goffset) utils.assert_eq(expect, got) @pytest.mark.parametrize( "date_col", [ [ "2000-01-01 00:00:00.012345678", "2000-01-31 00:00:00.012345678", "2000-02-29 00:00:00.012345678", ] ], ) @pytest.mark.parametrize( "kwargs", [ {"months": 2, "years": 5}, {"microseconds": 1, "seconds": 1}, {"months": 2, "years": 5, "seconds": 923, "microseconds": 481}, pytest.param( {"milliseconds": 4}, marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="Pandas gets the wrong answer for milliseconds", ), ), pytest.param( {"milliseconds": 4, "years": 2}, marks=pytest_xfail( reason="https://github.com/pandas-dev/pandas/issues/49897" ), ), pytest.param( {"nanoseconds": 12}, marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="Pandas gets the wrong answer for nanoseconds", ), ), {"nanoseconds": 12}, ], ) @pytest.mark.parametrize("op", [operator.add, operator.sub]) def test_datetime_dateoffset_binaryop_multiple(date_col, kwargs, op): gsr = cudf.Series(date_col, dtype="datetime64[ns]") psr = gsr.to_pandas() poffset = pd.DateOffset(**kwargs) goffset = cudf.DateOffset(**kwargs) expect = op(psr, poffset) got = op(gsr, goffset) utils.assert_eq(expect, got) @pytest.mark.parametrize( "date_col", [ [ "2000-01-01 00:00:00.012345678", "2000-01-31 00:00:00.012345678", "2000-02-29 00:00:00.012345678", ] ], ) @pytest.mark.parametrize("n_periods", [0, 1, -1, 12, -12]) @pytest.mark.parametrize( "frequency", [ "months", "years", "days", "hours", "minutes", "seconds", "microseconds", pytest.param( "nanoseconds", marks=pytest_xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/issues/36589", ), ), ], ) @pytest.mark.parametrize( "dtype", ["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"], ) def test_datetime_dateoffset_binaryop_reflected( date_col, n_periods, frequency, dtype ): gsr = cudf.Series(date_col, dtype=dtype) psr = gsr.to_pandas() # converts to nanos kwargs = {frequency: n_periods} goffset = cudf.DateOffset(**kwargs) poffset = pd.DateOffset(**kwargs) expect = poffset + psr got = goffset + gsr utils.assert_eq(expect, got) with pytest.raises(TypeError): poffset - psr with pytest.raises(TypeError): goffset - gsr @pytest.mark.parametrize("frame", [cudf.Series, cudf.Index, cudf.DataFrame]) @pytest.mark.parametrize( "dtype", ["int", "str", "datetime64[s]", "timedelta64[s]", "category"] ) def test_binops_with_lhs_numpy_scalar(frame, dtype): data = [1, 2, 3, 4, 5] data = ( frame({"a": data}, dtype=dtype) if isinstance(frame, cudf.DataFrame) else frame(data, dtype=dtype) ) if dtype == "datetime64[s]": val = cudf.dtype(dtype).type(4, "s") elif dtype == "timedelta64[s]": val = cudf.dtype(dtype).type(4, "s") elif dtype == "category": val = np.int64(4) elif dtype == "str": val = str(4) else: val = cudf.dtype(dtype).type(4) # Compare equality with series on left side to dispatch to the pandas/cudf # __eq__ operator and avoid a DeprecationWarning from numpy. expected = data.to_pandas() == val got = data == val utils.assert_eq(expected, got) @pytest.mark.parametrize( "dtype", [ "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "float32", "float64", "datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]", "timedelta64[ns]", "timedelta64[us]", "timedelta64[ms]", "timedelta64[s]", ], ) @pytest.mark.parametrize("op", _operators_comparison) def test_binops_with_NA_consistent(dtype, op): data = [1, 2, 3] sr = cudf.Series(data, dtype=dtype) result = getattr(sr, op)(cudf.NA) if dtype in NUMERIC_TYPES: if op == "ne": expect_all = True else: expect_all = False assert (result == expect_all).all() elif dtype in DATETIME_TYPES & TIMEDELTA_TYPES: assert result._column.null_count == len(data) @pytest.mark.parametrize( "op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype", [ ( operator.add, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["3.0", "4.0"], cudf.Decimal64Dtype(scale=2, precision=4), ), ( operator.add, 2, cudf.Decimal64Dtype(scale=2, precision=3), ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["3.5", "4.0"], cudf.Decimal64Dtype(scale=2, precision=4), ), ( operator.add, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["2.25", "1.005"], cudf.Decimal64Dtype(scale=3, precision=4), ["3.75", "3.005"], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=17), ["0.1", "0.2"], cudf.Decimal64Dtype(scale=3, precision=4), ["100.1", "200.2"], cudf.Decimal128Dtype(scale=3, precision=23), ), ( operator.sub, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=1, precision=2), ["2.25", "1.005"], cudf.Decimal64Dtype(scale=3, precision=4), ["-0.75", "0.995"], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.sub, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=1, precision=2), ["2.25", "1.005"], cudf.Decimal64Dtype(scale=3, precision=4), ["-0.75", "0.995"], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=10), ["0.1", "0.2"], cudf.Decimal64Dtype(scale=6, precision=10), ["99.9", "199.8"], cudf.Decimal128Dtype(scale=6, precision=19), ), ( operator.sub, 2, cudf.Decimal64Dtype(scale=3, precision=4), ["2.25", "1.005"], cudf.Decimal64Dtype(scale=3, precision=4), ["-0.25", "0.995"], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.mul, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["1.5", "3.0"], cudf.Decimal64Dtype(scale=3, precision=4), ["2.25", "6.0"], cudf.Decimal64Dtype(scale=5, precision=8), ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), ["0.1", "0.2"], cudf.Decimal64Dtype(scale=3, precision=4), ["10.0", "40.0"], cudf.Decimal64Dtype(scale=1, precision=8), ), ( operator.mul, ["1000", "2000"], cudf.Decimal64Dtype(scale=-3, precision=4), ["0.343", "0.500"], cudf.Decimal64Dtype(scale=3, precision=3), ["343.0", "1000.0"], cudf.Decimal64Dtype(scale=0, precision=8), ), ( operator.mul, 200, cudf.Decimal64Dtype(scale=3, precision=6), ["0.343", "0.500"], cudf.Decimal64Dtype(scale=3, precision=6), ["68.60", "100.0"], cudf.Decimal64Dtype(scale=6, precision=13), ), ( operator.truediv, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=4), ["1.5", "3.0"], cudf.Decimal64Dtype(scale=1, precision=4), ["1.0", "0.6"], cudf.Decimal64Dtype(scale=7, precision=10), ), ( operator.truediv, ["110", "200"], cudf.Decimal64Dtype(scale=-1, precision=3), ["0.1", "0.2"], cudf.Decimal64Dtype(scale=2, precision=4), ["1000.0", "1000.0"], cudf.Decimal64Dtype(scale=6, precision=12), ), ( operator.truediv, ["132.86", "15.25"], cudf.Decimal64Dtype(scale=4, precision=14), ["2.34", "8.50"], cudf.Decimal64Dtype(scale=2, precision=8), ["56.77", "1.79"], cudf.Decimal128Dtype(scale=13, precision=25), ), ( operator.truediv, 20, cudf.Decimal128Dtype(scale=2, precision=6), ["20", "20"], cudf.Decimal128Dtype(scale=2, precision=6), ["1.0", "1.0"], cudf.Decimal128Dtype(scale=9, precision=15), ), ( operator.add, ["1.5", None, "2.0"], cudf.Decimal64Dtype(scale=1, precision=2), ["1.5", None, "2.0"], cudf.Decimal64Dtype(scale=1, precision=2), ["3.0", None, "4.0"], cudf.Decimal64Dtype(scale=1, precision=3), ), ( operator.add, ["1.5", None], cudf.Decimal64Dtype(scale=2, precision=3), ["2.25", "1.005"], cudf.Decimal64Dtype(scale=3, precision=4), ["3.75", None], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.sub, ["1.5", None], cudf.Decimal64Dtype(scale=2, precision=3), ["2.25", None], cudf.Decimal64Dtype(scale=3, precision=4), ["-0.75", None], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.sub, ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["2.25", None], cudf.Decimal64Dtype(scale=3, precision=4), ["-0.75", None], cudf.Decimal64Dtype(scale=3, precision=5), ), ( operator.mul, ["1.5", None], cudf.Decimal64Dtype(scale=2, precision=3), ["1.5", None], cudf.Decimal64Dtype(scale=3, precision=4), ["2.25", None], cudf.Decimal64Dtype(scale=5, precision=8), ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=10), ["0.1", None], cudf.Decimal64Dtype(scale=3, precision=12), ["10.0", None], cudf.Decimal128Dtype(scale=1, precision=23), ), ( operator.eq, ["0.18", "0.42"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.18", "0.21"], cudf.Decimal64Dtype(scale=2, precision=3), [True, False], bool, ), ( operator.eq, ["0.18", "0.42"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.1800", "0.2100"], cudf.Decimal64Dtype(scale=4, precision=5), [True, False], bool, ), ( operator.eq, ["100", None], cudf.Decimal64Dtype(scale=-2, precision=3), ["100", "200"], cudf.Decimal64Dtype(scale=-1, precision=4), [True, None], bool, ), ( operator.ne, ["0.06", "0.42"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.18", "0.42"], cudf.Decimal64Dtype(scale=2, precision=3), [True, False], bool, ), ( operator.ne, ["1.33", "1.21"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.1899", "1.21"], cudf.Decimal64Dtype(scale=4, precision=5), [True, False], bool, ), ( operator.ne, ["300", None], cudf.Decimal64Dtype(scale=-2, precision=3), ["110", "5500"], cudf.Decimal64Dtype(scale=-1, precision=4), [True, None], bool, ), ( operator.lt, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.10", "0.87", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), [False, True, False], bool, ), ( operator.lt, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.1000", "0.8700", "1.0000"], cudf.Decimal64Dtype(scale=4, precision=5), [False, True, False], bool, ), ( operator.lt, ["200", None, "100"], cudf.Decimal64Dtype(scale=-2, precision=3), ["100", "200", "100"], cudf.Decimal64Dtype(scale=-1, precision=4), [False, None, False], bool, ), ( operator.gt, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.10", "0.87", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), [True, False, False], bool, ), ( operator.gt, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.1000", "0.8700", "1.0000"], cudf.Decimal64Dtype(scale=4, precision=5), [True, False, False], bool, ), ( operator.gt, ["300", None, "100"], cudf.Decimal64Dtype(scale=-2, precision=3), ["100", "200", "100"], cudf.Decimal64Dtype(scale=-1, precision=4), [True, None, False], bool, ), ( operator.le, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.10", "0.87", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), [False, True, True], bool, ), ( operator.le, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.1000", "0.8700", "1.0000"], cudf.Decimal64Dtype(scale=4, precision=5), [False, True, True], bool, ), ( operator.le, ["300", None, "100"], cudf.Decimal64Dtype(scale=-2, precision=3), ["100", "200", "100"], cudf.Decimal64Dtype(scale=-1, precision=4), [False, None, True], bool, ), ( operator.ge, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.10", "0.87", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), [True, False, True], bool, ), ( operator.ge, ["0.18", "0.42", "1.00"], cudf.Decimal64Dtype(scale=2, precision=3), ["0.1000", "0.8700", "1.0000"], cudf.Decimal64Dtype(scale=4, precision=5), [True, False, True], bool, ), ( operator.ge, ["300", None, "100"], cudf.Decimal64Dtype(scale=-2, precision=3), ["100", "200", "100"], cudf.Decimal64Dtype(scale=-1, precision=4), [True, None, True], bool, ), ], ) def test_binops_decimal(op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype): if isinstance(lhs, (int, float)): a = cudf.Scalar(lhs, l_dtype) else: a = utils._decimal_series(lhs, l_dtype) b = utils._decimal_series(rhs, r_dtype) expect = ( utils._decimal_series(expect, expect_dtype) if isinstance( expect_dtype, (cudf.Decimal64Dtype, cudf.Decimal32Dtype, cudf.Decimal128Dtype), ) else cudf.Series(expect, dtype=expect_dtype) ) got = op(a, b) assert expect.dtype == got.dtype utils.assert_eq(expect, got) @pytest.mark.parametrize( "op,lhs,l_dtype,rhs,r_dtype,expect,expect_dtype", [ ( "radd", ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["1.5", "2.0"], cudf.Decimal64Dtype(scale=2, precision=3), ["3.0", "4.0"], cudf.Decimal64Dtype(scale=2, precision=4), ), ( "rsub", ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=10), ["0.1", "0.2"], cudf.Decimal64Dtype(scale=6, precision=10), ["-99.9", "-199.8"], cudf.Decimal128Dtype(scale=6, precision=19), ), ( "rmul", ["1000", "2000"], cudf.Decimal64Dtype(scale=-3, precision=4), ["0.343", "0.500"], cudf.Decimal64Dtype(scale=3, precision=3), ["343.0", "1000.0"], cudf.Decimal64Dtype(scale=0, precision=8), ), ( "rtruediv", ["1.5", "0.5"], cudf.Decimal64Dtype(scale=3, precision=6), ["1.5", "2.0"], cudf.Decimal64Dtype(scale=3, precision=6), ["1.0", "4.0"], cudf.Decimal64Dtype(scale=10, precision=16), ), ], ) def test_binops_reflect_decimal( op, lhs, l_dtype, rhs, r_dtype, expect, expect_dtype ): a = utils._decimal_series(lhs, l_dtype) b = utils._decimal_series(rhs, r_dtype) expect = utils._decimal_series(expect, expect_dtype) got = getattr(a, op)(b) assert expect.dtype == got.dtype utils.assert_eq(expect, got) @pytest.mark.parametrize("powers", [0, 1, 2, 3]) def test_binops_decimal_pow(powers): s = cudf.Series( [ decimal.Decimal("1.324324"), None, decimal.Decimal("2"), decimal.Decimal("3"), decimal.Decimal("5"), ] ) ps = s.to_pandas() utils.assert_eq(s**powers, ps**powers, check_dtype=False) def test_binops_raise_error(): s = cudf.Series([decimal.Decimal("1.324324")]) with pytest.raises(TypeError): s // 1 @pytest.mark.parametrize( "args", [ ( operator.eq, ["100", "41", None], cudf.Decimal64Dtype(scale=0, precision=5), [100, 42, 12], cudf.Series([True, False, None], dtype=bool), cudf.Series([True, False, None], dtype=bool), ), ( operator.eq, ["100.000", "42.001", None], cudf.Decimal64Dtype(scale=3, precision=6), [100, 42, 12], cudf.Series([True, False, None], dtype=bool), cudf.Series([True, False, None], dtype=bool), ), ( operator.eq, ["100", "40", None], cudf.Decimal64Dtype(scale=-1, precision=3), [100, 42, 12], cudf.Series([True, False, None], dtype=bool), cudf.Series([True, False, None], dtype=bool), ), ( operator.ne, ["100", "42", "24", None], cudf.Decimal64Dtype(scale=0, precision=3), [100, 40, 24, 12], cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, True, False, None], dtype=bool), ), ( operator.ne, ["10.1", "88", "11", None], cudf.Decimal64Dtype(scale=1, precision=3), [10, 42, 11, 12], cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, True, False, None], dtype=bool), ), ( operator.ne, ["100.000", "42", "23.999", None], cudf.Decimal64Dtype(scale=3, precision=6), [100, 42, 24, 12], cudf.Series([False, False, True, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.lt, ["100", "40", "28", None], cudf.Decimal64Dtype(scale=0, precision=3), [100, 42, 24, 12], cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.lt, ["100.000", "42.002", "23.999", None], cudf.Decimal64Dtype(scale=3, precision=6), [100, 42, 24, 12], cudf.Series([False, False, True, None], dtype=bool), cudf.Series([False, True, False, None], dtype=bool), ), ( operator.lt, ["100", "40", "10", None], cudf.Decimal64Dtype(scale=-1, precision=3), [100, 42, 8, 12], cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.gt, ["100", "42", "20", None], cudf.Decimal64Dtype(scale=0, precision=3), [100, 40, 24, 12], cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.gt, ["100.000", "42.002", "23.999", None], cudf.Decimal64Dtype(scale=3, precision=6), [100, 42, 24, 12], cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.gt, ["100", "40", "10", None], cudf.Decimal64Dtype(scale=-1, precision=3), [100, 42, 8, 12], cudf.Series([False, False, True, None], dtype=bool), cudf.Series([False, True, False, None], dtype=bool), ), ( operator.le, ["100", "40", "28", None], cudf.Decimal64Dtype(scale=0, precision=3), [100, 42, 24, 12], cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ( operator.le, ["100.000", "42.002", "23.999", None], cudf.Decimal64Dtype(scale=3, precision=6), [100, 42, 24, 12], cudf.Series([True, False, True, None], dtype=bool), cudf.Series([True, True, False, None], dtype=bool), ), ( operator.le, ["100", "40", "10", None], cudf.Decimal64Dtype(scale=-1, precision=3), [100, 42, 8, 12], cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ( operator.ge, ["100", "42", "20", None], cudf.Decimal64Dtype(scale=0, precision=3), [100, 40, 24, 12], cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ( operator.ge, ["100.000", "42.002", "23.999", None], cudf.Decimal64Dtype(scale=3, precision=6), [100, 42, 24, 12], cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ( operator.ge, ["100", "40", "10", None], cudf.Decimal64Dtype(scale=-1, precision=3), [100, 42, 8, 12], cudf.Series([True, False, True, None], dtype=bool), cudf.Series([True, True, False, None], dtype=bool), ), ], ) @pytest.mark.parametrize("integer_dtype", utils.INTEGER_TYPES) @pytest.mark.parametrize("reflected", [True, False]) def test_binops_decimal_comp_mixed_integer(args, integer_dtype, reflected): """ Tested compare operations: eq, lt, gt, le, ge Each operation has 3 decimal data setups, with scale from {==0, >0, <0}. Decimal precisions are sufficient to hold the digits. For each decimal data setup, there is at least one row that lead to one of the following compare results: {True, False, None}. """ if not reflected: op, ldata, ldtype, rdata, expected, _ = args else: op, ldata, ldtype, rdata, _, expected = args lhs = utils._decimal_series(ldata, ldtype) rhs = cudf.Series(rdata, dtype=integer_dtype) if reflected: rhs, lhs = lhs, rhs actual = op(lhs, rhs) utils.assert_eq(expected, actual) @pytest.mark.parametrize( "args", [ ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal(1), ["101", "201"], cudf.Decimal64Dtype(scale=0, precision=6), False, ), ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), 1, ["101", "201"], cudf.Decimal64Dtype(scale=0, precision=6), False, ), ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("1.5"), ["101.5", "201.5"], cudf.Decimal64Dtype(scale=1, precision=7), False, ), ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal(1), ["101", "201"], cudf.Decimal64Dtype(scale=0, precision=6), True, ), ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), 1, ["101", "201"], cudf.Decimal64Dtype(scale=0, precision=6), True, ), ( operator.add, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("1.5"), ["101.5", "201.5"], cudf.Decimal64Dtype(scale=1, precision=7), True, ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), 1, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=5), False, ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal(2), ["200", "400"], cudf.Decimal64Dtype(scale=-2, precision=5), False, ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("1.5"), ["150", "300"], cudf.Decimal64Dtype(scale=-1, precision=6), False, ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), 1, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=5), True, ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal(2), ["200", "400"], cudf.Decimal64Dtype(scale=-2, precision=5), True, ), ( operator.mul, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("1.5"), ["150", "300"], cudf.Decimal64Dtype(scale=-1, precision=6), True, ), ( operator.truediv, ["1000", "2000"], cudf.Decimal64Dtype(scale=-2, precision=4), 1, ["1000", "2000"], cudf.Decimal64Dtype(scale=6, precision=12), False, ), ( operator.truediv, ["100", "200"], cudf.Decimal64Dtype(scale=2, precision=5), decimal.Decimal(2), ["50", "100"], cudf.Decimal64Dtype(scale=6, precision=9), False, ), ( operator.truediv, ["35.23", "54.91"], cudf.Decimal64Dtype(scale=2, precision=4), decimal.Decimal("1.5"), ["23.4", "36.6"], cudf.Decimal64Dtype(scale=6, precision=9), False, ), ( operator.truediv, ["100", "200"], cudf.Decimal64Dtype(scale=2, precision=5), 1, ["0", "0"], cudf.Decimal64Dtype(scale=6, precision=9), True, ), ( operator.truediv, ["1.2", "0.5"], cudf.Decimal64Dtype(scale=1, precision=6), decimal.Decimal(20), ["10", "40"], cudf.Decimal64Dtype(scale=7, precision=10), True, ), ( operator.truediv, ["1.22", "5.24"], cudf.Decimal64Dtype(scale=2, precision=3), decimal.Decimal("8.55"), ["7", "1"], cudf.Decimal64Dtype(scale=6, precision=9), True, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal(2), ["98", "198"], cudf.Decimal64Dtype(scale=0, precision=6), False, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("2.5"), ["97.5", "197.5"], cudf.Decimal64Dtype(scale=1, precision=7), False, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), 4, ["96", "196"], cudf.Decimal64Dtype(scale=0, precision=6), False, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal(2), ["-98", "-198"], cudf.Decimal64Dtype(scale=0, precision=6), True, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), 4, ["-96", "-196"], cudf.Decimal64Dtype(scale=0, precision=6), True, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("2.5"), ["-97.5", "-197.5"], cudf.Decimal64Dtype(scale=1, precision=7), True, ), ( operator.sub, ["100", "200"], cudf.Decimal64Dtype(scale=-2, precision=3), decimal.Decimal("2.5"), ["-97.5", "-197.5"], cudf.Decimal64Dtype(scale=1, precision=7), True, ), ], ) def test_binops_decimal_scalar(args): op, lhs, l_dtype, rhs, expect, expect_dtype, reflect = args def decimal_series(input, dtype): return cudf.Series( [x if x is None else decimal.Decimal(x) for x in input], dtype=dtype, ) lhs = decimal_series(lhs, l_dtype) expect = decimal_series(expect, expect_dtype) if reflect: lhs, rhs = rhs, lhs got = op(lhs, rhs) assert expect.dtype == got.dtype utils.assert_eq(expect, got) @pytest.mark.parametrize( "args", [ ( operator.eq, ["100.00", "41", None], cudf.Decimal64Dtype(scale=0, precision=5), 100, cudf.Series([True, False, None], dtype=bool), cudf.Series([True, False, None], dtype=bool), ), ( operator.eq, ["100.123", "41", None], cudf.Decimal64Dtype(scale=3, precision=6), decimal.Decimal("100.123"), cudf.Series([True, False, None], dtype=bool), cudf.Series([True, False, None], dtype=bool), ), ( operator.eq, ["100.123", "41", None], cudf.Decimal64Dtype(scale=3, precision=6), cudf.Scalar(decimal.Decimal("100.123")), cudf.Series([True, False, None], dtype=bool), cudf.Series([True, False, None], dtype=bool), ), ( operator.ne, ["100.00", "41", None], cudf.Decimal64Dtype(scale=2, precision=5), 100, cudf.Series([False, True, None], dtype=bool), cudf.Series([False, True, None], dtype=bool), ), ( operator.ne, ["100.123", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), decimal.Decimal("100.123"), cudf.Series([False, True, None], dtype=bool), cudf.Series([False, True, None], dtype=bool), ), ( operator.ne, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), cudf.Scalar(decimal.Decimal("100.123")), cudf.Series([False, True, True, None], dtype=bool), cudf.Series([False, True, True, None], dtype=bool), ), ( operator.gt, ["100.00", "41", "120.21", None], cudf.Decimal64Dtype(scale=2, precision=5), 100, cudf.Series([False, False, True, None], dtype=bool), cudf.Series([False, True, False, None], dtype=bool), ), ( operator.gt, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), decimal.Decimal("100.123"), cudf.Series([False, False, True, None], dtype=bool), cudf.Series([False, True, False, None], dtype=bool), ), ( operator.gt, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), cudf.Scalar(decimal.Decimal("100.123")), cudf.Series([False, False, True, None], dtype=bool), cudf.Series([False, True, False, None], dtype=bool), ), ( operator.ge, ["100.00", "41", "120.21", None], cudf.Decimal64Dtype(scale=2, precision=5), 100, cudf.Series([True, False, True, None], dtype=bool), cudf.Series([True, True, False, None], dtype=bool), ), ( operator.ge, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), decimal.Decimal("100.123"), cudf.Series([True, False, True, None], dtype=bool), cudf.Series([True, True, False, None], dtype=bool), ), ( operator.ge, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), cudf.Scalar(decimal.Decimal("100.123")), cudf.Series([True, False, True, None], dtype=bool), cudf.Series([True, True, False, None], dtype=bool), ), ( operator.lt, ["100.00", "41", "120.21", None], cudf.Decimal64Dtype(scale=2, precision=5), 100, cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.lt, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), decimal.Decimal("100.123"), cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.lt, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), cudf.Scalar(decimal.Decimal("100.123")), cudf.Series([False, True, False, None], dtype=bool), cudf.Series([False, False, True, None], dtype=bool), ), ( operator.le, ["100.00", "41", "120.21", None], cudf.Decimal64Dtype(scale=2, precision=5), 100, cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ( operator.le, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), decimal.Decimal("100.123"), cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ( operator.le, ["100.123", "41", "120.21", None], cudf.Decimal64Dtype(scale=3, precision=6), cudf.Scalar(decimal.Decimal("100.123")), cudf.Series([True, True, False, None], dtype=bool), cudf.Series([True, False, True, None], dtype=bool), ), ], ) @pytest.mark.parametrize("reflected", [True, False]) def test_binops_decimal_scalar_compare(args, reflected): """ Tested compare operations: eq, lt, gt, le, ge Each operation has 3 data setups: pyints, Decimal, and decimal cudf.Scalar For each data setup, there is at least one row that lead to one of the following compare results: {True, False, None}. """ if not reflected: op, ldata, ldtype, rdata, expected, _ = args else: op, ldata, ldtype, rdata, _, expected = args lhs = utils._decimal_series(ldata, ldtype) rhs = rdata if reflected: rhs, lhs = lhs, rhs actual = op(lhs, rhs) utils.assert_eq(expected, actual) @pytest.mark.parametrize( "dtype", [ "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64", "float32", "float64", "str", "datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]", "timedelta64[ns]", "timedelta64[us]", "timedelta64[ms]", "timedelta64[s]", ], ) @pytest.mark.parametrize("null_scalar", [None, cudf.NA, np.datetime64("NaT")]) @pytest.mark.parametrize("cmpop", _cmpops) def test_column_null_scalar_comparison(dtype, null_scalar, cmpop): # This test is meant to validate that comparing # a series of any dtype with a null scalar produces # a new series where all the elements are <NA>. if isinstance(null_scalar, np.datetime64): if cudf.dtype(dtype).kind not in "mM": pytest.skip() null_scalar = null_scalar.astype(dtype) dtype = cudf.dtype(dtype) data = [1, 2, 3, 4, 5] sr = cudf.Series(data, dtype=dtype) result = cmpop(sr, null_scalar) assert result.isnull().all() @pytest.mark.parametrize("fn", ["eq", "ne", "lt", "gt", "le", "ge"]) def test_equality_ops_index_mismatch(fn): a = cudf.Series( [1, 2, 3, None, None, 4], index=["a", "b", "c", "d", "e", "f"] ) b = cudf.Series( [-5, 4, 3, 2, 1, 0, 19, 11], index=["aa", "b", "c", "d", "e", "f", "y", "z"], ) pa = a.to_pandas(nullable=True) pb = b.to_pandas(nullable=True) expected = getattr(pa, fn)(pb) actual = getattr(a, fn)(b).to_pandas(nullable=True) utils.assert_eq(expected, actual) def generate_test_null_equals_columnops_data(): # Generate tuples of: # (left_data, right_data, compare_bool # where compare_bool is the correct answer to # if the columns should compare as null equals def set_null_cases(column_l, column_r, case): if case == "neither": return column_l, column_r elif case == "left": column_l[1] = None elif case == "right": column_r[1] = None elif case == "both": column_l[1] = None column_r[1] = None else: raise ValueError("Unknown null case") return column_l, column_r null_cases = ["neither", "left", "right", "both"] data = [1, 2, 3] results = [] # TODO: Numeric types can be cross compared as null equal for dtype in ( list(NUMERIC_TYPES) + list(DATETIME_TYPES) + list(TIMEDELTA_TYPES) + list(STRING_TYPES) + ["category"] ): for case in null_cases: left = cudf.Series(data, dtype=dtype) right = cudf.Series(data, dtype=dtype) if case in {"left", "right"}: answer = False else: answer = True left, right = set_null_cases(left, right, case) results.append((left._column, right._column, answer, case)) return results @pytest.mark.parametrize( "lcol,rcol,ans,case", generate_test_null_equals_columnops_data() ) def test_null_equals_columnops(lcol, rcol, ans, case): assert lcol.equals(rcol).all() == ans def test_add_series_to_dataframe(): """Verify that missing columns result in NaNs, not NULLs.""" assert cp.all( cp.isnan( ( cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + cudf.Series([1, 2, 3], index=["a", "b", "c"]) )["c"] ) ) @pytest.mark.parametrize("obj_class", [cudf.Series, cudf.Index]) @pytest.mark.parametrize("binop", _binops) def test_binops_cupy_array(obj_class, binop): # Skip 0 to not deal with NaNs from division. data = range(1, 100) lhs = obj_class(data) rhs = cp.array(data) assert (binop(lhs, rhs) == binop(lhs, lhs)).all() @pytest.mark.parametrize("binop", _binops + _binops_compare) @pytest.mark.parametrize("data", [None, [-9, 7], [5, -2], [12, 18]]) @pytest.mark.parametrize("scalar", [1, 3, 12, np.nan]) def test_empty_column(binop, data, scalar): gdf = cudf.DataFrame(columns=["a", "b"]) if data is not None: gdf["a"] = data pdf = gdf.to_pandas() got = binop(gdf, scalar) expected = binop(pdf, scalar) utils.assert_eq(expected, got) @pytest.mark.parametrize( "df", [ cudf.DataFrame( [[1, 2, 3, 4], [5, 6, 7, 8], [10, 11, 12, 13], [14, 15, 16, 17]] ), pytest.param( cudf.DataFrame([[1, None, None, 4], [5, 6, 7, None]]), marks=pytest_xfail( reason="Cannot access Frame.values if frame contains nulls" ), ), cudf.DataFrame( [ [1.2, 2.3, 3.4, 4.5], [5.6, 6.7, 7.8, 8.9], [7.43, 4.2, 23.2, 23.2], [9.1, 2.4, 4.5, 65.34], ] ), cudf.Series([14, 15, 16, 17]), cudf.Series([14.15, 15.16, 16.17, 17.18]), ], ) @pytest.mark.parametrize( "other", [ cudf.DataFrame([[9, 10], [11, 12], [13, 14], [15, 16]]), cudf.DataFrame( [[9.4, 10.5], [11.6, 12.7], [13.8, 14.9], [15.1, 16.2]] ), cudf.Series([5, 6, 7, 8]), cudf.Series([5.6, 6.7, 7.8, 8.9]), np.array([5, 6, 7, 8]), [25.5, 26.6, 27.7, 28.8], ], ) def test_binops_dot(df, other): pdf = df.to_pandas() host_other = other.to_pandas() if hasattr(other, "to_pandas") else other expected = pdf @ host_other got = df @ other utils.assert_eq(expected, got) def test_binop_dot_preserve_index(): ser = cudf.Series(range(2), index=["A", "B"]) df = cudf.DataFrame(np.eye(2), columns=["A", "B"], index=["A", "B"]) result = ser @ df expected = ser.to_pandas() @ df.to_pandas() utils.assert_eq(result, expected) def test_binop_series_with_repeated_index(): # GH: #11094 psr1 = pd.Series([1, 1], index=["a", "a"]) psr2 = pd.Series([1], index=["a"]) gsr1 = cudf.from_pandas(psr1) gsr2 = cudf.from_pandas(psr2) expected = psr1 - psr2 got = gsr1 - gsr2 utils.assert_eq(expected, got) def test_binop_integer_power_series_series(): # GH: #10178 gs_base = cudf.Series([3, -3, 8, -8]) gs_exponent = cudf.Series([1, 1, 7, 7]) ps_base = gs_base.to_pandas() ps_exponent = gs_exponent.to_pandas() expected = ps_base**ps_exponent got = gs_base**gs_exponent utils.assert_eq(expected, got) def test_binop_integer_power_series_scalar(): # GH: #10178 gs_base = cudf.Series([3, -3, 8, -8]) exponent = cudf.Scalar(1) ps_base = gs_base.to_pandas() expected = ps_base**exponent.value got = gs_base**exponent utils.assert_eq(expected, got) def test_binop_integer_power_series_int(): # GH: #10178 gs_base = cudf.Series([3, -3, 8, -8]) exponent = 1 ps_base = gs_base.to_pandas() expected = ps_base**exponent got = gs_base**exponent utils.assert_eq(expected, got) def test_binop_integer_power_scalar_series(): # GH: #10178 base = cudf.Scalar(3) gs_exponent = cudf.Series([1, 1, 7, 7]) ps_exponent = gs_exponent.to_pandas() expected = base.value**ps_exponent got = base**gs_exponent utils.assert_eq(expected, got) def test_binop_integer_power_scalar_scalar(): # GH: #10178 base = cudf.Scalar(3) exponent = cudf.Scalar(1) expected = base.value**exponent.value got = base**exponent utils.assert_eq(expected, got) def test_binop_integer_power_scalar_int(): # GH: #10178 base = cudf.Scalar(3) exponent = 1 expected = base.value**exponent got = base**exponent utils.assert_eq(expected, got) def test_binop_integer_power_int_series(): # GH: #10178 base = 3 gs_exponent = cudf.Series([1, 1, 7, 7]) ps_exponent = gs_exponent.to_pandas() expected = base**ps_exponent got = base**gs_exponent utils.assert_eq(expected, got) def test_binop_integer_power_int_scalar(): # GH: #10178 base = 3 exponent = cudf.Scalar(1) expected = base**exponent.value got = base**exponent utils.assert_eq(expected, got) def test_numpy_int_scalar_binop(): assert (np.float32(1.0) - cudf.Scalar(1)) == 0.0 @pytest.mark.parametrize("op", _binops) def test_binop_index_series(op): gi = cudf.Index([10, 11, 12]) gs = cudf.Series([1, 2, 3]) actual = op(gi, gs) expected = op(gi.to_pandas(), gs.to_pandas()) utils.assert_eq(expected, actual) @pytest.mark.parametrize("name1", utils.SERIES_OR_INDEX_NAMES) @pytest.mark.parametrize("name2", utils.SERIES_OR_INDEX_NAMES) def test_binop_index_dt_td_series_with_names(name1, name2): gi = cudf.Index([1, 2, 3], dtype="datetime64[ns]", name=name1) gs = cudf.Series([10, 11, 12], dtype="timedelta64[ns]", name=name2) with warnings.catch_warnings(): # Numpy raises a deprecation warning: # "elementwise comparison failed; this will raise an error " warnings.simplefilter("ignore", (DeprecationWarning,)) expected = gi.to_pandas() + gs.to_pandas() actual = gi + gs utils.assert_eq(expected, actual) @pytest.mark.parametrize("data1", [[1, 2, 3], [10, 11, None]]) @pytest.mark.parametrize("data2", [[1, 2, 3], [10, 11, None]]) def test_binop_eq_ne_index_series(data1, data2): gi = cudf.Index(data1, dtype="datetime64[ns]", name=np.nan) gs = cudf.Series(data2, dtype="timedelta64[ns]", name="abc") actual = gi == gs expected = gi.to_pandas() == gs.to_pandas() utils.assert_eq(expected, actual) actual = gi != gs expected = gi.to_pandas() != gs.to_pandas() utils.assert_eq(expected, actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_factorize.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import cupy as cp import numpy as np import pandas as pd import pytest import cudf from cudf import DataFrame, Index from cudf.testing._utils import assert_eq @pytest.mark.parametrize("ncats,nelem", [(2, 2), (2, 10), (10, 100)]) def test_factorize_series_obj(ncats, nelem): df = DataFrame() np.random.seed(0) # initialize data frame df["cats"] = arr = np.random.randint(2, size=10, dtype=np.int32) uvals, labels = df["cats"].factorize() np.testing.assert_array_equal(labels.to_numpy(), sorted(set(arr))) assert isinstance(uvals, cp.ndarray) assert isinstance(labels, Index) encoder = {labels[idx]: idx for idx in range(len(labels))} handcoded = [encoder[v] for v in arr] np.testing.assert_array_equal(uvals.get(), handcoded) @pytest.mark.parametrize("ncats,nelem", [(2, 2), (2, 10), (10, 100)]) def test_factorize_index_obj(ncats, nelem): df = DataFrame() np.random.seed(0) # initialize data frame df["cats"] = arr = np.random.randint(2, size=10, dtype=np.int32) df = df.set_index("cats") uvals, labels = df.index.factorize() np.testing.assert_array_equal(labels.values.get(), sorted(set(arr))) assert isinstance(uvals, cp.ndarray) assert isinstance(labels, Index) encoder = {labels[idx]: idx for idx in range(len(labels))} handcoded = [encoder[v] for v in arr] np.testing.assert_array_equal(uvals.get(), handcoded) def test_factorize_series_index(): df = DataFrame() df["col1"] = ["C", "H", "C", "W", "W", "W", "W", "W", "C", "W"] df["col2"] = [ 2992443.0, 2992447.0, 2992466.0, 2992440.0, 2992441.0, 2992442.0, 2992444.0, 2992445.0, 2992446.0, 2992448.0, ] assert_eq(df.col1.factorize()[0].get(), df.to_pandas().col1.factorize()[0]) assert_eq( df.col1.factorize()[1].to_pandas().values, df.to_pandas().col1.factorize()[1].values, ) df = df.set_index("col2") assert_eq(df.col1.factorize()[0].get(), df.to_pandas().col1.factorize()[0]) assert_eq( df.col1.factorize()[1].to_pandas().values, df.to_pandas().col1.factorize()[1].values, ) def test_cudf_factorize_series(): data = [1, 2, 3, 4, 5] psr = pd.Series(data) gsr = cudf.Series(data) expect = pd.factorize(psr) got = cudf.factorize(gsr) assert len(expect) == len(got) np.testing.assert_array_equal(expect[0], got[0].get()) np.testing.assert_array_equal(expect[1], got[1].values.get()) def test_cudf_factorize_index(): data = [1, 2, 3, 4, 5] pi = pd.Index(data) gi = cudf.Index(data) expect = pd.factorize(pi) got = cudf.factorize(gi) assert len(expect) == len(got) np.testing.assert_array_equal(expect[0], got[0].get()) np.testing.assert_array_equal(expect[1], got[1].values.get()) def test_cudf_factorize_array(): data = [1, 2, 3, 4, 5] parr = np.array(data) garr = cp.array(data) expect = pd.factorize(parr) got = cudf.factorize(garr) assert len(expect) == len(got) np.testing.assert_array_equal(expect[0], got[0].get()) np.testing.assert_array_equal(expect[1], got[1].get()) @pytest.mark.parametrize("pandas_compatibility", [True, False]) def test_factorize_code_pandas_compatibility(pandas_compatibility): psr = pd.Series([1, 2, 3, 4, 5]) gsr = cudf.from_pandas(psr) expect = pd.factorize(psr) with cudf.option_context("mode.pandas_compatible", pandas_compatibility): got = cudf.factorize(gsr) assert_eq(got[0], expect[0]) assert_eq(got[1], expect[1]) if pandas_compatibility: assert got[0].dtype == expect[0].dtype else: assert got[0].dtype == cudf.dtype("int8") def test_factorize_result_classes(): data = [1, 2, 3] labels, cats = cudf.factorize(cudf.Series(data)) assert isinstance(labels, cp.ndarray) assert isinstance(cats, cudf.BaseIndex) labels, cats = cudf.factorize(cudf.Index(data)) assert isinstance(labels, cp.ndarray) assert isinstance(cats, cudf.BaseIndex) labels, cats = cudf.factorize(cp.array(data)) assert isinstance(labels, cp.ndarray) assert isinstance(cats, cp.ndarray) @pytest.mark.parametrize( "data", [ ["abc", "def", "abc", "a", "def", None], [10, 20, 100, -10, 0, 1, None, 10, 100], ], ) def test_category_dtype_factorize(data): gs = cudf.Series(data, dtype="category") ps = gs.to_pandas() actual_codes, actual_uniques = gs.factorize() expected_codes, expected_uniques = ps.factorize() assert_eq(actual_codes, expected_codes) assert_eq(actual_uniques, expected_uniques)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_array_ufunc.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import operator import warnings from contextlib import contextmanager from functools import reduce import cupy as cp import numpy as np import pytest import cudf from cudf.core._compat import PANDAS_GE_150 from cudf.testing._utils import assert_eq, set_random_null_mask_inplace _UFUNCS = [ obj for obj in (getattr(np, name) for name in dir(np)) if isinstance(obj, np.ufunc) ] @contextmanager def _hide_ufunc_warnings(ufunc): # pandas raises warnings for some inputs to the following ufuncs: name = ufunc.__name__ if name in { "arccos", "arccosh", "arcsin", "arctanh", "fmod", "log", "log10", "log2", "reciprocal", }: with warnings.catch_warnings(): warnings.filterwarnings( "ignore", f"invalid value encountered in {name}", category=RuntimeWarning, ) warnings.filterwarnings( "ignore", f"divide by zero encountered in {name}", category=RuntimeWarning, ) yield else: yield @pytest.mark.parametrize("ufunc", _UFUNCS) def test_ufunc_index(request, ufunc): # Note: This test assumes that all ufuncs are unary or binary. fname = ufunc.__name__ request.applymarker( pytest.mark.xfail( condition=( fname in {"bitwise_and", "bitwise_or", "bitwise_xor"} and not PANDAS_GE_150 ), reason="https://github.com/pandas-dev/pandas/issues/46769", ) ) request.applymarker( pytest.mark.xfail( condition=not hasattr(cp, fname), reason=f"cupy has no support for '{fname}'", ) ) N = 100 # Avoid zeros in either array to skip division by 0 errors. Also limit the # scale to avoid issues with overflow, etc. We use ints because some # operations (like bitwise ops) are not defined for floats. pandas_args = args = [ cudf.Index( cp.random.randint(low=1, high=10, size=N), ) for _ in range(ufunc.nin) ] got = ufunc(*args) with _hide_ufunc_warnings(ufunc): expect = ufunc(*(arg.to_pandas() for arg in pandas_args)) if ufunc.nout > 1: for g, e in zip(got, expect): assert_eq(g, e, check_exact=False) else: assert_eq(got, expect, check_exact=False) @pytest.mark.parametrize( "ufunc", [np.add, np.greater, np.greater_equal, np.logical_and] ) @pytest.mark.parametrize("reflect", [True, False]) def test_binary_ufunc_index_array(ufunc, reflect): N = 100 # Avoid zeros in either array to skip division by 0 errors. Also limit the # scale to avoid issues with overflow, etc. We use ints because some # operations (like bitwise ops) are not defined for floats. args = [cudf.Index(cp.random.rand(N)) for _ in range(ufunc.nin)] arg1 = args[1].to_cupy() if reflect: got = ufunc(arg1, args[0]) expect = ufunc(args[1].to_numpy(), args[0].to_pandas()) else: got = ufunc(args[0], arg1) expect = ufunc(args[0].to_pandas(), args[1].to_numpy()) if ufunc.nout > 1: for g, e in zip(got, expect): if reflect: assert (cp.asnumpy(g) == e).all() else: assert_eq(g, e, check_exact=False) else: if reflect: assert (cp.asnumpy(got) == expect).all() else: assert_eq(got, expect, check_exact=False) @pytest.mark.parametrize("ufunc", _UFUNCS) @pytest.mark.parametrize("has_nulls", [True, False]) @pytest.mark.parametrize("indexed", [True, False]) def test_ufunc_series(request, ufunc, has_nulls, indexed): # Note: This test assumes that all ufuncs are unary or binary. fname = ufunc.__name__ request.applymarker( pytest.mark.xfail( condition=( indexed and fname in { "greater", "greater_equal", "less", "less_equal", "not_equal", "equal", } ), reason="Comparison operators do not support misaligned indexes.", ) ) request.applymarker( pytest.mark.xfail( condition=ufunc == np.matmul and has_nulls, reason="Can't call cupy on column with nulls", ) ) # If we don't have explicit dispatch and cupy doesn't support the operator, # we expect a failure request.applymarker( pytest.mark.xfail( condition=not hasattr(cp, fname), reason=f"cupy has no support for '{fname}'", ) ) N = 100 # Avoid zeros in either array to skip division by 0 errors. Also limit the # scale to avoid issues with overflow, etc. We use ints because some # operations (like bitwise ops) are not defined for floats. pandas_args = args = [ cudf.Series( cp.random.randint(low=1, high=10, size=N), index=cp.random.choice(range(N), N, False) if indexed else None, ) for _ in range(ufunc.nin) ] if has_nulls: # Converting nullable integer cudf.Series to pandas will produce a # float pd.Series, so instead we replace nulls with an arbitrary # integer value, precompute the mask, and then reapply it afterwards. for arg in args: set_random_null_mask_inplace(arg) pandas_args = [arg.fillna(0) for arg in args] # Note: Different indexes must be aligned before the mask is computed. # This requires using an internal function (_align_indices), and that # is unlikely to change for the foreseeable future. aligned = ( cudf.core.series._align_indices(args, allow_non_unique=True) if indexed and ufunc.nin == 2 else args ) mask = reduce(operator.or_, (a.isna() for a in aligned)).to_pandas() got = ufunc(*args) with _hide_ufunc_warnings(ufunc): expect = ufunc(*(arg.to_pandas() for arg in pandas_args)) if ufunc.nout > 1: for g, e in zip(got, expect): if has_nulls: e[mask] = np.nan assert_eq(g, e, check_exact=False) else: if has_nulls: expect[mask] = np.nan assert_eq(got, expect, check_exact=False) @pytest.mark.parametrize( "ufunc", [np.add, np.greater, np.greater_equal, np.logical_and] ) @pytest.mark.parametrize("has_nulls", [True, False]) @pytest.mark.parametrize("indexed", [True, False]) @pytest.mark.parametrize("reflect", [True, False]) def test_binary_ufunc_series_array( request, ufunc, has_nulls, indexed, reflect ): fname = ufunc.__name__ request.applymarker( pytest.mark.xfail( condition=reflect and has_nulls, reason=( "When cupy is the left operand there is no way for us to " "avoid calling its binary operators, which cannot handle " "cudf objects that contain nulls." ), ) ) # The way cudf casts nans in arrays to nulls during binops with cudf # objects is currently incompatible with pandas. request.applymarker( pytest.mark.xfail( condition=( fname in {"greater", "greater_equal", "logical_and"} and has_nulls ), reason=( "cudf and pandas incompatible casting nans " "to nulls in binops" ), ) ) N = 100 # Avoid zeros in either array to skip division by 0 errors. Also limit the # scale to avoid issues with overflow, etc. We use ints because some # operations (like bitwise ops) are not defined for floats. args = [ cudf.Series( cp.random.rand(N), index=cp.random.choice(range(N), N, False) if indexed else None, ) for _ in range(ufunc.nin) ] if has_nulls: # Converting nullable integer cudf.Series to pandas will produce a # float pd.Series, so instead we replace nulls with an arbitrary # integer value, precompute the mask, and then reapply it afterwards. for arg in args: set_random_null_mask_inplace(arg) # Cupy doesn't support nulls, so we fill with nans before converting. args[1] = args[1].fillna(cp.nan) mask = args[0].isna().to_pandas() arg1 = args[1].to_cupy() if reflect: got = ufunc(arg1, args[0]) expect = ufunc(args[1].to_numpy(), args[0].to_pandas()) else: got = ufunc(args[0], arg1) expect = ufunc(args[0].to_pandas(), args[1].to_numpy()) if ufunc.nout > 1: for g, e in zip(got, expect): if has_nulls: e[mask] = np.nan if reflect: assert (cp.asnumpy(g) == e).all() else: assert_eq(g, e, check_exact=False) else: if has_nulls: expect[mask] = np.nan if reflect: assert (cp.asnumpy(got) == expect).all() else: assert_eq(got, expect, check_exact=False) @pytest.mark.parametrize( "func", [np.add], ) def test_ufunc_cudf_series_error_with_out_kwarg(func): cudf_s1 = cudf.Series(data=[-1, 2, 3, 0]) cudf_s2 = cudf.Series(data=[-1, 2, 3, 0]) cudf_s3 = cudf.Series(data=[0, 0, 0, 0]) # this throws a value-error because of presence of out kwarg with pytest.raises(TypeError): func(x1=cudf_s1, x2=cudf_s2, out=cudf_s3) # Skip matmul since it requires aligned shapes. @pytest.mark.parametrize("ufunc", (uf for uf in _UFUNCS if uf != np.matmul)) @pytest.mark.parametrize("has_nulls", [True, False]) @pytest.mark.parametrize("indexed", [True, False]) def test_ufunc_dataframe(request, ufunc, has_nulls, indexed): # Note: This test assumes that all ufuncs are unary or binary. fname = ufunc.__name__ request.applymarker( pytest.mark.xfail( condition=( indexed and fname in { "greater", "greater_equal", "less", "less_equal", "not_equal", "equal", } ), reason="Comparison operators do not support misaligned indexes.", ) ) # If we don't have explicit dispatch and cupy doesn't support the operator, # we expect a failure request.applymarker( pytest.mark.xfail( condition=not hasattr(cp, fname), reason=f"cupy has no support for '{fname}'", ) ) request.applymarker( pytest.mark.xfail( condition=( indexed and fname in { "add", "arctan2", "bitwise_and", "bitwise_or", "bitwise_xor", "copysign", "divide", "divmod", "float_power", "floor_divide", "fmax", "fmin", "fmod", "heaviside", "gcd", "hypot", "lcm", "ldexp", "left_shift", "logaddexp", "logaddexp2", "logical_and", "logical_or", "logical_xor", "maximum", "minimum", "multiply", "nextafter", "power", "remainder", "right_shift", "subtract", } ), reason=( "pandas does not currently support misaligned " "indexes in DataFrames" ), ) ) N = 100 # Avoid zeros in either array to skip division by 0 errors. Also limit the # scale to avoid issues with overflow, etc. We use ints because some # operations (like bitwise ops) are not defined for floats. # TODO: Add tests of mismatched columns etc. pandas_args = args = [ cudf.DataFrame( {"foo": cp.random.randint(low=1, high=10, size=N)}, index=cp.random.choice(range(N), N, False) if indexed else None, ) for _ in range(ufunc.nin) ] if has_nulls: # Converting nullable integer cudf.Series to pandas will produce a # float pd.Series, so instead we replace nulls with an arbitrary # integer value, precompute the mask, and then reapply it afterwards. for arg in args: set_random_null_mask_inplace(arg["foo"]) pandas_args = [arg.copy() for arg in args] for arg in pandas_args: arg["foo"] = arg["foo"].fillna(0) # Note: Different indexes must be aligned before the mask is computed. # This requires using an internal function (_align_indices), and that # is unlikely to change for the foreseeable future. aligned = ( cudf.core.dataframe._align_indices(*args) if indexed and ufunc.nin == 2 else args ) mask = reduce( operator.or_, (a["foo"].isna() for a in aligned) ).to_pandas() got = ufunc(*args) with _hide_ufunc_warnings(ufunc): expect = ufunc(*(arg.to_pandas() for arg in pandas_args)) if ufunc.nout > 1: for g, e in zip(got, expect): if has_nulls: e[mask] = np.nan assert_eq(g, e, check_exact=False) else: if has_nulls: expect[mask] = np.nan assert_eq(got, expect, check_exact=False)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_mvc.py
# Copyright (c) 2023, NVIDIA CORPORATION. import subprocess import sys import pytest IS_CUDA_11 = False IS_CUDA_12 = False try: from ptxcompiler.patch import safe_get_versions except ModuleNotFoundError: from cudf.utils._ptxcompiler import safe_get_versions # do not test cuda 12 if pynvjitlink isn't present HAVE_PYNVJITLINK = False try: import pynvjitlink # noqa: F401 HAVE_PYNVJITLINK = True except ModuleNotFoundError: pass versions = safe_get_versions() driver_version, runtime_version = versions if (11, 0) <= driver_version < (12, 0): IS_CUDA_11 = True if (12, 0) <= driver_version < (13, 0): IS_CUDA_12 = True TEST_BODY = """ @numba.cuda.jit def test_kernel(x): id = numba.cuda.grid(1) if id < len(x): x[id] += 1 s = cudf.Series([1, 2, 3]) with _CUDFNumbaConfig(): test_kernel.forall(len(s))(s) """ CUDA_11_TEST = ( """ import numba.cuda import cudf from cudf.utils._numba import _CUDFNumbaConfig, patch_numba_linker_cuda_11 patch_numba_linker_cuda_11() """ + TEST_BODY ) CUDA_12_TEST = ( """ import numba.cuda import cudf from cudf.utils._numba import _CUDFNumbaConfig from pynvjitlink.patch import ( patch_numba_linker as patch_numba_linker_pynvjitlink, ) patch_numba_linker_pynvjitlink() """ + TEST_BODY ) @pytest.mark.parametrize( "test", [ pytest.param( CUDA_11_TEST, marks=pytest.mark.skipif( not IS_CUDA_11, reason="Minor Version Compatibility test for CUDA 11", ), ), pytest.param( CUDA_12_TEST, marks=pytest.mark.skipif( not IS_CUDA_12 or not HAVE_PYNVJITLINK, reason="Minor Version Compatibility test for CUDA 12", ), ), ], ) def test_numba_mvc(test): cp = subprocess.run( [sys.executable, "-c", test], capture_output=True, cwd="/", ) assert cp.returncode == 0
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_compile_udf.py
# Copyright (c) 2021, NVIDIA CORPORATION. from numba import types from cudf.utils import cudautils def setup_function(): cudautils._udf_code_cache.clear() def assert_cache_size(size): assert cudautils._udf_code_cache.currsize == size def test_first_compile_sets_cache_entry(): # The first compilation should put an entry in the cache cudautils.compile_udf(lambda x: x + 1, (types.float32,)) assert_cache_size(1) def test_code_cache_same_code_different_function_hit(): # Compilation of a distinct function with the same code and signature # should reuse the cached entry cudautils.compile_udf(lambda x: x + 1, (types.float32,)) assert_cache_size(1) cudautils.compile_udf(lambda x: x + 1, (types.float32,)) assert_cache_size(1) def test_code_cache_different_types_miss(): # Compilation of a distinct function with the same code but different types # should create an additional cache entry cudautils.compile_udf(lambda x: x + 1, (types.float32,)) assert_cache_size(1) cudautils.compile_udf(lambda x: x + 1, (types.float64,)) assert_cache_size(2) def test_code_cache_different_cvars_miss(): # Compilation of a distinct function with the same types and code as an # existing entry but different closure variables should create an # additional cache entry def gen_closure(y): return lambda x: x + y cudautils.compile_udf(gen_closure(1), (types.float32,)) assert_cache_size(1) cudautils.compile_udf(gen_closure(2), (types.float32,)) assert_cache_size(2) def test_lambda_in_loop_code_cached(): # Compiling a UDF defined in a loop should result in the code cache being # reused for each loop iteration after the first. We check for this by # ensuring that there is only one entry in the code cache after the loop. for i in range(3): cudautils.compile_udf(lambda x: x + 1, (types.float32,)) assert_cache_size(1)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_rank.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. from itertools import chain, combinations_with_replacement, product import numpy as np import pandas as pd import pytest from cudf import DataFrame from cudf.testing._utils import assert_eq, assert_exceptions_equal @pytest.fixture def pdf(): return pd.DataFrame( { "col1": np.array([5, 4, 3, 5, 8, 5, 2, 1, 6, 6]), "col2": np.array( [5, 4, np.nan, 5, 8, 5, np.inf, np.nan, 6, -np.inf] ), }, index=np.array([5, 4, 3, 2, 1, 6, 7, 8, 9, 10]), ) @pytest.mark.parametrize("dtype", ["O", "f8", "i4"]) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"]) @pytest.mark.parametrize("na_option", ["keep", "top", "bottom"]) @pytest.mark.parametrize("pct", [True, False]) @pytest.mark.parametrize("numeric_only", [True, False]) def test_rank_all_arguments( pdf, dtype, ascending, method, na_option, pct, numeric_only ): if method == "first" and dtype == "O": # not supported by pandas return pdf = pdf.copy(deep=True) # for parallel pytest if numeric_only: pdf["str"] = np.array( ["a", "b", "c", "d", "e", "1", "2", "3", "4", "5"] ) gdf = DataFrame.from_pandas(pdf) kwargs = { "method": method, "na_option": na_option, "ascending": ascending, "pct": pct, "numeric_only": numeric_only, } # Series assert_eq(gdf["col1"].rank(**kwargs), pdf["col1"].rank(**kwargs)) assert_eq(gdf["col2"].rank(**kwargs), pdf["col2"].rank(**kwargs)) if numeric_only: with pytest.warns(FutureWarning): expect = pdf["str"].rank(**kwargs) got = gdf["str"].rank(**kwargs) assert expect.empty == got.empty expected = pdf.select_dtypes(include=np.number) else: expected = pdf.copy(deep=True) actual = gdf.rank(**kwargs) expected = pdf.rank(**kwargs) assert_eq(expected, actual) def test_rank_error_arguments(pdf): gdf = DataFrame.from_pandas(pdf) assert_exceptions_equal( lfunc=pdf["col1"].rank, rfunc=gdf["col1"].rank, lfunc_args_and_kwargs=( [], { "method": "randomname", "na_option": "keep", "ascending": True, "pct": True, }, ), rfunc_args_and_kwargs=( [], { "method": "randomname", "na_option": "keep", "ascending": True, "pct": True, }, ), ) assert_exceptions_equal( lfunc=pdf["col1"].rank, rfunc=gdf["col1"].rank, lfunc_args_and_kwargs=( [], { "method": "first", "na_option": "randomname", "ascending": True, "pct": True, }, ), rfunc_args_and_kwargs=( [], { "method": "first", "na_option": "randomname", "ascending": True, "pct": True, }, ), ) sort_group_args = [ np.full((3,), np.nan), 100 * np.random.random(10), np.full((3,), np.inf), np.full((3,), -np.inf), ] sort_dtype_args = [np.int32, np.int64, np.float32, np.float64] @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") @pytest.mark.parametrize( "elem,dtype", list( product( combinations_with_replacement(sort_group_args, 4), sort_dtype_args, ) ), ) def test_series_rank_combinations(elem, dtype): np.random.seed(0) aa = np.fromiter(chain.from_iterable(elem), np.float64).astype(dtype) gdf = DataFrame() df = pd.DataFrame() gdf["a"] = aa df["a"] = aa ranked_gs = gdf["a"].rank(method="first") ranked_ps = df["a"].rank(method="first") # Check assert_eq(ranked_ps, ranked_gs)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_reductions.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. from decimal import Decimal from itertools import product import numpy as np import pandas as pd import pytest import cudf from cudf import Series from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype, Decimal128Dtype from cudf.testing import _utils as utils from cudf.testing._utils import ( NUMERIC_TYPES, assert_eq, expect_warning_if, gen_rand, ) params_dtype = NUMERIC_TYPES params_sizes = [1, 2, 3, 127, 128, 129, 200, 10000] params = list(product(params_dtype, params_sizes)) @pytest.mark.parametrize("dtype,nelem", params) def test_sum(dtype, nelem): dtype = cudf.dtype(dtype).type data = gen_rand(dtype, nelem) sr = Series(data) got = sr.sum() expect = data.sum() significant = 4 if dtype == np.float32 else 6 np.testing.assert_approx_equal(expect, got, significant=significant) def test_sum_string(): s = Series(["Hello", "there", "World"]) got = s.sum() expected = "HellothereWorld" assert got == expected s = Series(["Hello", None, "World"]) got = s.sum() expected = "HelloWorld" assert got == expected @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(6, 3), Decimal64Dtype(10, 6), Decimal64Dtype(16, 7), Decimal32Dtype(6, 3), Decimal128Dtype(20, 7), ], ) @pytest.mark.parametrize("nelem", params_sizes) def test_sum_decimal(dtype, nelem): np.random.seed(0) data = [str(x) for x in gen_rand("int64", nelem) / 100] expected = pd.Series([Decimal(x) for x in data]).sum() got = cudf.Series(data).astype(dtype).sum() assert_eq(expected, got) @pytest.mark.parametrize("dtype,nelem", params) def test_product(dtype, nelem): np.random.seed(0) dtype = cudf.dtype(dtype).type if cudf.dtype(dtype).kind in {"u", "i"}: data = np.ones(nelem, dtype=dtype) # Set at most 30 items to [0..2) to keep the value within 2^32 for _ in range(30): data[np.random.randint(low=0, high=nelem, size=1)] = ( np.random.uniform() * 2 ) else: data = gen_rand(dtype, nelem) sr = Series(data) got = sr.product() expect = pd.Series(data).product() significant = 4 if dtype == np.float32 else 6 np.testing.assert_approx_equal(expect, got, significant=significant) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(6, 2), Decimal64Dtype(8, 4), Decimal64Dtype(10, 5), Decimal32Dtype(6, 2), Decimal128Dtype(20, 5), ], ) def test_product_decimal(dtype): np.random.seed(0) data = [str(x) for x in gen_rand("int8", 3) / 10] expected = pd.Series([Decimal(x) for x in data]).product() got = cudf.Series(data).astype(dtype).product() assert_eq(expected, got) accuracy_for_dtype = {np.float64: 6, np.float32: 5} @pytest.mark.parametrize("dtype,nelem", params) def test_sum_of_squares(dtype, nelem): dtype = cudf.dtype(dtype).type data = gen_rand(dtype, nelem) sr = Series(data) df = cudf.DataFrame(sr) got = (sr**2).sum() got_df = (df**2).sum() expect = (data**2).sum() if cudf.dtype(dtype).kind in {"u", "i"}: if 0 <= expect <= np.iinfo(dtype).max: np.testing.assert_array_almost_equal(expect, got) np.testing.assert_array_almost_equal(expect, got_df.iloc[0]) else: print("overflow, passing") else: np.testing.assert_approx_equal( expect, got, significant=accuracy_for_dtype[dtype] ) np.testing.assert_approx_equal( expect, got_df.iloc[0], significant=accuracy_for_dtype[dtype] ) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(6, 2), Decimal64Dtype(8, 4), Decimal64Dtype(10, 5), Decimal128Dtype(20, 7), Decimal32Dtype(6, 2), ], ) def test_sum_of_squares_decimal(dtype): np.random.seed(0) data = [str(x) for x in gen_rand("int8", 3) / 10] expected = pd.Series([Decimal(x) for x in data]).pow(2).sum() got = (cudf.Series(data).astype(dtype) ** 2).sum() assert_eq(expected, got) @pytest.mark.parametrize("dtype,nelem", params) def test_min(dtype, nelem): dtype = cudf.dtype(dtype).type data = gen_rand(dtype, nelem) sr = Series(data) got = sr.min() expect = dtype(data.min()) assert expect == got @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(6, 3), Decimal64Dtype(10, 6), Decimal64Dtype(16, 7), Decimal32Dtype(6, 3), Decimal128Dtype(20, 7), ], ) @pytest.mark.parametrize("nelem", params_sizes) def test_min_decimal(dtype, nelem): np.random.seed(0) data = [str(x) for x in gen_rand("int64", nelem) / 100] expected = pd.Series([Decimal(x) for x in data]).min() got = cudf.Series(data).astype(dtype).min() assert_eq(expected, got) @pytest.mark.parametrize("dtype,nelem", params) def test_max(dtype, nelem): dtype = cudf.dtype(dtype).type data = gen_rand(dtype, nelem) sr = Series(data) got = sr.max() expect = dtype(data.max()) assert expect == got @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(6, 3), Decimal64Dtype(10, 6), Decimal64Dtype(16, 7), Decimal32Dtype(6, 3), Decimal128Dtype(20, 7), ], ) @pytest.mark.parametrize("nelem", params_sizes) def test_max_decimal(dtype, nelem): np.random.seed(0) data = [str(x) for x in gen_rand("int64", nelem) / 100] expected = pd.Series([Decimal(x) for x in data]).max() got = cudf.Series(data).astype(dtype).max() assert_eq(expected, got) @pytest.mark.parametrize("nelem", params_sizes) def test_sum_masked(nelem): dtype = np.float64 data = gen_rand(dtype, nelem) mask = utils.random_bitmask(nelem) bitmask = utils.expand_bits_to_bytes(mask)[:nelem] null_count = utils.count_zero(bitmask) sr = Series.from_masked_array(data, mask, null_count) got = sr.sum() res_mask = np.asarray(bitmask, dtype=np.bool_)[: data.size] expect = data[res_mask].sum() significant = 4 if dtype == np.float32 else 6 np.testing.assert_approx_equal(expect, got, significant=significant) def test_sum_boolean(): s = Series(np.arange(100000)) got = (s > 1).sum(dtype=np.int32) expect = 99998 assert expect == got got = (s > 1).sum(dtype=np.bool_) expect = True assert expect == got def test_date_minmax(): np_data = np.random.normal(size=10**3) gdf_data = Series(np_data) np_casted = np_data.astype("datetime64[ms]") gdf_casted = gdf_data.astype("datetime64[ms]") np_min = np_casted.min() gdf_min = gdf_casted.min() assert np_min == gdf_min np_max = np_casted.max() gdf_max = gdf_casted.max() assert np_max == gdf_max @pytest.mark.parametrize( "op", ["sum", "product", "var", "kurt", "kurtosis", "skew"], ) def test_datetime_unsupported_reductions(op): gsr = cudf.Series([1, 2, 3, None], dtype="datetime64[ns]") psr = gsr.to_pandas() utils.assert_exceptions_equal( lfunc=getattr(psr, op), rfunc=getattr(gsr, op), ) @pytest.mark.parametrize("op", ["product", "var", "kurt", "kurtosis", "skew"]) def test_timedelta_unsupported_reductions(op): gsr = cudf.Series([1, 2, 3, None], dtype="timedelta64[ns]") psr = gsr.to_pandas() utils.assert_exceptions_equal( lfunc=getattr(psr, op), rfunc=getattr(gsr, op), ) @pytest.mark.parametrize("op", ["sum", "product", "std", "var"]) def test_categorical_reductions(op): gsr = cudf.Series([1, 2, 3, None], dtype="category") psr = gsr.to_pandas() utils.assert_exceptions_equal(getattr(psr, op), getattr(gsr, op)) @pytest.mark.parametrize( "data", [ {"a": [1, 2, 3], "b": [10, 11, 12]}, {"a": [1, 0, 3], "b": [10, 11, 12]}, {"a": [1, 2, 3], "b": [10, 11, None]}, { "a": [], }, {}, ], ) @pytest.mark.parametrize("op", ["all", "any"]) def test_any_all_axis_none(data, op): gdf = cudf.DataFrame(data) pdf = gdf.to_pandas() expected = getattr(pdf, op)(axis=None) actual = getattr(gdf, op)(axis=None) assert expected == actual @pytest.mark.parametrize( "op", [ "sum", "product", "std", "var", "kurt", "kurtosis", "skew", "min", "max", "mean", "median", ], ) def test_reductions_axis_none_warning(op): df = cudf.DataFrame({"a": [1, 2, 3], "b": [10, 2, 3]}) pdf = df.to_pandas() with pytest.warns(FutureWarning): actual = getattr(df, op)(axis=None) with expect_warning_if( op in {"kurt", "kurtosis", "skew", "min", "max", "mean", "median"}, FutureWarning, ): expected = getattr(pdf, op)(axis=None) assert_eq(expected, actual, check_dtype=False)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_replace.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import re from decimal import Decimal import numpy as np import pandas as pd import pytest import cudf from cudf.core._compat import PANDAS_GE_134, PANDAS_GE_150 from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype, Decimal128Dtype from cudf.testing._utils import ( INTEGER_TYPES, NUMERIC_TYPES, assert_eq, assert_exceptions_equal, ) @pytest.mark.parametrize( "gsr", [ cudf.Series([5, 1, 2, 3, None, 243, None, 4]), cudf.Series(["one", "two", "three", None, "one"], dtype="category"), cudf.Series(list(range(400)) + [None]), ], ) @pytest.mark.parametrize( "to_replace,value", [ (0, 5), ("one", "two"), ("one", "five"), ("abc", "hello"), ([0, 1], [5, 6]), ([22, 323, 27, 0], -1), ([1, 2, 3], cudf.Series([10, 11, 12])), (cudf.Series([1, 2, 3]), None), ({1: 10, 2: 22}, None), (np.inf, 4), ], ) def test_series_replace_all(gsr, to_replace, value): psr = gsr.to_pandas() gd_to_replace = to_replace if isinstance(to_replace, cudf.Series): pd_to_replace = to_replace.to_pandas() else: pd_to_replace = to_replace gd_value = value if isinstance(value, cudf.Series): pd_value = value.to_pandas() else: pd_value = value actual = gsr.replace(to_replace=gd_to_replace, value=gd_value) if pd_value is None: # TODO: Remove this workaround once cudf # introduces `no_default` values expected = psr.replace(to_replace=pd_to_replace) else: expected = psr.replace(to_replace=pd_to_replace, value=pd_value) assert_eq( expected.sort_values().reset_index(drop=True), actual.sort_values().reset_index(drop=True), ) def test_series_replace(): a1 = np.array([0, 1, 2, 3, 4]) # Numerical a2 = np.array([5, 1, 2, 3, 4]) sr1 = cudf.Series(a1) sr2 = sr1.replace(0, 5) assert_eq(a2, sr2.to_numpy()) # Categorical psr3 = pd.Series(["one", "two", "three"], dtype="category") psr4 = psr3.replace("one", "two") sr3 = cudf.from_pandas(psr3) sr4 = sr3.replace("one", "two") assert_eq( psr4.sort_values().reset_index(drop=True), sr4.sort_values().reset_index(drop=True), ) psr5 = psr3.replace("one", "five") sr5 = sr3.replace("one", "five") assert_eq(psr5, sr5) # List input a6 = np.array([5, 6, 2, 3, 4]) sr6 = sr1.replace([0, 1], [5, 6]) assert_eq(a6, sr6.to_numpy()) with pytest.raises(TypeError): sr1.replace([0, 1], [5.5, 6.5]) # Series input a8 = np.array([5, 5, 5, 3, 4]) sr8 = sr1.replace(sr1[:3].to_numpy(), 5) assert_eq(a8, sr8.to_numpy()) # large input containing null sr9 = cudf.Series(list(range(400)) + [None]) sr10 = sr9.replace([22, 323, 27, 0], None) assert sr10.null_count == 5 assert len(sr10.dropna().to_numpy()) == (401 - 5) sr11 = sr9.replace([22, 323, 27, 0], -1) assert sr11.null_count == 1 assert len(sr11.dropna().to_numpy()) == (401 - 1) # large input not containing nulls sr9 = sr9.fillna(-11) sr12 = sr9.replace([22, 323, 27, 0], None) assert sr12.null_count == 4 assert len(sr12.dropna().to_numpy()) == (401 - 4) sr13 = sr9.replace([22, 323, 27, 0], -1) assert sr13.null_count == 0 assert len(sr13.to_numpy()) == 401 def test_series_replace_with_nulls(): a1 = np.array([0, 1, 2, 3, 4]) # Numerical a2 = np.array([-10, 1, 2, 3, 4]) sr1 = cudf.Series(a1) sr2 = sr1.replace(0, None).fillna(-10) assert_eq(a2, sr2.to_numpy()) # List input a6 = np.array([-10, 6, 2, 3, 4]) sr6 = sr1.replace([0, 1], [None, 6]).fillna(-10) assert_eq(a6, sr6.to_numpy()) sr1 = cudf.Series([0, 1, 2, 3, 4, None]) with pytest.raises(TypeError): sr1.replace([0, 1], [5.5, 6.5]).fillna(-10) # Series input a8 = np.array([-10, -10, -10, 3, 4, -10]) sr8 = sr1.replace(cudf.Series([-10] * 3, index=sr1[:3]), None).fillna(-10) assert_eq(a8, sr8.to_numpy()) a9 = np.array([-10, 6, 2, 3, 4, -10]) sr9 = sr1.replace([0, 1], [None, 6]).fillna(-10) assert_eq(a9, sr9.to_numpy()) @pytest.mark.parametrize( "df", [ cudf.DataFrame( { "a": [0, 1, None, 2, 3], "b": [3, 2, 2, 3, None], "c": ["abc", "def", ".", None, None], } ), pytest.param( cudf.DataFrame( { "a": ["one", "two", None, "three"], "b": ["one", None, "two", "three"], }, dtype="category", ), marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/issues/46672", ), ), cudf.DataFrame( { "col one": [None, 10, 11, None, 1000, 500, 600], "col two": ["abc", "def", "ghi", None, "pp", None, "a"], "a": [0.324, 0.234, 324.342, 23.32, 9.9, None, None], } ), ], ) @pytest.mark.parametrize( "to_replace,value", [ (0, 4), ([0, 1], [4, 5]), ([0, 1], 4), ({"a": 0, "b": 0}, {"a": 4, "b": 5}), ({"a": 0}, {"a": 4}), ("abc", "---"), ([".", "gh"], "hi"), ([".", "def"], ["_", None]), ({"c": 0}, {"a": 4, "b": 5}), ({"a": 2}, {"c": "a"}), ("two", "three"), ([1, 2], pd.Series([10, 11])), (pd.Series([10, 11], index=[3, 2]), None), ( pd.Series(["a+", "+c", "p", "---"], index=["abc", "gh", "l", "z"]), None, ), ( pd.Series([10, 11], index=[3, 2]), {"a": [-10, -30], "l": [-111, -222]}, ), (pd.Series([10, 11], index=[3, 2]), 555), ( pd.Series([10, 11], index=["a", "b"]), pd.Series([555, 1111], index=["a", "b"]), ), ({"a": "2", "b": "3", "zzz": "hi"}, None), ({"a": 2, "b": 3, "zzz": "hi"}, 324353), ( {"a": 2, "b": 3, "zzz": "hi"}, pd.Series([5, 6, 10], index=["a", "b", "col one"]), ), ], ) def test_dataframe_replace(df, to_replace, value): gdf = df pdf = gdf.to_pandas() pd_value = value if isinstance(value, pd.Series): gd_value = cudf.from_pandas(value) else: gd_value = value pd_to_replace = to_replace if isinstance(to_replace, pd.Series): gd_to_replace = cudf.from_pandas(to_replace) else: gd_to_replace = to_replace if pd_value is None: expected = pdf.replace(to_replace=pd_to_replace) else: expected = pdf.replace(to_replace=pd_to_replace, value=pd_value) actual = gdf.replace(to_replace=gd_to_replace, value=gd_value) expected_sorted = expected.sort_values(by=list(expected.columns), axis=0) actual_sorted = actual.sort_values(by=list(actual.columns), axis=0) assert_eq(expected_sorted, actual_sorted) def test_dataframe_replace_with_nulls(): # numerical pdf1 = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, 3]}) gdf1 = cudf.from_pandas(pdf1) pdf2 = pdf1.replace(0, 4) gdf2 = gdf1.replace(0, None).fillna(4) assert_eq(gdf2, pdf2) # list input pdf6 = pdf1.replace([0, 1], [4, 5]) gdf6 = gdf1.replace([0, 1], [4, None]).fillna(5) assert_eq(gdf6, pdf6) pdf7 = pdf1.replace([0, 1], 4) gdf7 = gdf1.replace([0, 1], None).fillna(4) assert_eq(gdf7, pdf7) # dict input: pdf8 = pdf1.replace({"a": 0, "b": 0}, {"a": 4, "b": 5}) gdf8 = gdf1.replace({"a": 0, "b": 0}, {"a": None, "b": 5}).fillna(4) assert_eq(gdf8, pdf8) gdf1 = cudf.DataFrame({"a": [0, 1, 2, 3], "b": [0, 1, 2, None]}) gdf9 = gdf1.replace([0, 1], [4, 5]).fillna(3) assert_eq(gdf9, pdf6) @pytest.mark.parametrize( "psr", [ pd.Series([0, 1, None, 2, None], dtype=pd.Int8Dtype()), pd.Series([0, 1, np.nan, 2, np.nan]), ], ) @pytest.mark.parametrize("data_dtype", NUMERIC_TYPES) @pytest.mark.parametrize("fill_value", [10, pd.Series([10, 20, 30, 40, 50])]) @pytest.mark.parametrize("inplace", [True, False]) def test_series_fillna_numerical(psr, data_dtype, fill_value, inplace): test_psr = psr.copy(deep=True) # TODO: These tests should use Pandas' nullable int type # when we support a recent enough version of Pandas # https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html if np.dtype(data_dtype).kind not in ("f") and test_psr.dtype.kind == "i": test_psr = test_psr.astype( cudf.utils.dtypes.np_dtypes_to_pandas_dtypes[np.dtype(data_dtype)] ) gsr = cudf.from_pandas(test_psr) if isinstance(fill_value, pd.Series): fill_value_cudf = cudf.from_pandas(fill_value) else: fill_value_cudf = fill_value expected = test_psr.fillna(fill_value, inplace=inplace) actual = gsr.fillna(fill_value_cudf, inplace=inplace) if inplace: expected = test_psr actual = gsr # TODO: Remove check_dtype when we have support # to compare with pandas nullable dtypes assert_eq(expected, actual, check_dtype=False) @pytest.mark.parametrize( "data", [ [1, None, None, 2, 3, 4], [None, None, 1, 2, None, 3, 4], [1, 2, None, 3, 4, None, None], [0] + [None] * 14, [None] * 14 + [0], ], ) @pytest.mark.parametrize("container", [pd.Series, pd.DataFrame]) @pytest.mark.parametrize("data_dtype", NUMERIC_TYPES) @pytest.mark.parametrize("method", ["ffill", "bfill"]) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_method_numerical(data, container, data_dtype, method, inplace): if container == pd.DataFrame: data = {"a": data, "b": data, "c": data} pdata = container(data) if np.dtype(data_dtype).kind not in ("f"): data_dtype = cudf.utils.dtypes.np_dtypes_to_pandas_dtypes[ np.dtype(data_dtype) ] pdata = pdata.astype(data_dtype) # Explicitly using nans_as_nulls=True gdata = cudf.from_pandas(pdata, nan_as_null=True) expected = pdata.fillna(method=method, inplace=inplace) actual = gdata.fillna(method=method, inplace=inplace) if inplace: expected = pdata actual = gdata assert_eq(expected, actual, check_dtype=False) @pytest.mark.parametrize( "gsr_data", [ cudf.Series(["2.34", "5.2", "7.47", None, "92.29", None]).astype( Decimal64Dtype(7, 2) ), cudf.Series(["-74.56", None, "-23.73", "34.55", "2.89", None]).astype( Decimal32Dtype(7, 2) ), cudf.Series( ["85.955", np.nan, "-3.243", np.nan, "29.492", np.nan] ).astype(Decimal64Dtype(8, 3)), cudf.Series( ["2.964", None, "57.432", "-989.330", None, "56.444"] ).astype(Decimal64Dtype(8, 3)), cudf.Series( [np.nan, "55.2498", np.nan, "-5.2965", "-28.9423", np.nan] ).astype(Decimal64Dtype(10, 4)), cudf.Series( ["2.964", None, "54347.432", "-989.330", None, "56.444"] ).astype(Decimal128Dtype(20, 7)), ], ) @pytest.mark.parametrize( "fill_value", [ 42, -123, Decimal("8.2"), Decimal("-12.87"), cudf.Series([None, -854, 9533, -274, -845, 7924], dtype="int32"), cudf.Series(["-53.5", "13.4", "-64.3", None, "42.42", None]).astype( Decimal64Dtype(7, 2) ), cudf.Series( ["57.45", np.nan, np.nan, "686.49", "-55.5", "73.24"], ).astype(Decimal64Dtype(7, 2)), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_decimal(gsr_data, fill_value, inplace): gsr = gsr_data.copy(deep=True) psr = gsr.to_pandas() if isinstance(fill_value, cudf.Series): p_fill_value = fill_value.to_pandas() else: p_fill_value = fill_value expected = psr.fillna(p_fill_value, inplace=inplace) got = gsr.fillna(fill_value, inplace=inplace) assert_eq(expected, got, check_dtype=False) @pytest.mark.parametrize( "psr_data", [ pd.Series(["a", "b", "a", None, "c", None], dtype="category"), pd.Series( ["a", "b", "a", None, "c", None], dtype="category", index=["q", "r", "z", "a", "b", "c"], ), pd.Series( ["a", "b", "a", None, "c", None], dtype="category", index=["x", "t", "p", "q", "r", "z"], ), pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"), pd.Series( [None, None, None, None, None, None, "a", "b", "c"], dtype="category", ), ], ) @pytest.mark.parametrize( "fill_value", [ "c", pd.Series(["c", "c", "c", "c", "c", "a"], dtype="category"), pd.Series( ["a", "b", "a", None, "c", None], dtype="category", index=["x", "t", "p", "q", "r", "z"], ), pd.Series( ["a", "b", "a", None, "c", None], dtype="category", index=["q", "r", "z", "a", "b", "c"], ), pd.Series(["a", "b", "a", None, "c", None], dtype="category"), pd.Series(["a", "b", "a", np.nan, "c", np.nan], dtype="category"), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_categorical(psr_data, fill_value, inplace): psr = psr_data.copy(deep=True) gsr = cudf.from_pandas(psr) if isinstance(fill_value, pd.Series): fill_value_cudf = cudf.from_pandas(fill_value) else: fill_value_cudf = fill_value if ( isinstance(fill_value_cudf, cudf.Series) and gsr.dtype != fill_value_cudf.dtype ): assert_exceptions_equal( lfunc=psr.fillna, rfunc=gsr.fillna, lfunc_args_and_kwargs=([fill_value], {"inplace": inplace}), rfunc_args_and_kwargs=([fill_value_cudf], {"inplace": inplace}), ) else: expected = psr.fillna(fill_value, inplace=inplace) got = gsr.fillna(fill_value_cudf, inplace=inplace) if inplace: expected = psr got = gsr assert_eq(expected, got) @pytest.mark.parametrize( "psr_data", [ pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y")), pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"), pd.Series( [ None, None, None, None, None, None, "2011-10-10", "2010-01-01", "2010-01-02", "2010-01-04", "2010-11-01", ], dtype="datetime64[ns]", ), pd.Series( [ None, None, None, None, None, None, "2011-10-10", "2010-01-01", "2010-01-02", "2010-01-04", "2010-11-01", ], dtype="datetime64[ns]", index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"], ), ], ) @pytest.mark.parametrize( "fill_value", [ pd.Timestamp("2010-01-02"), pd.Series(pd.date_range("2010-01-01", "2020-01-10", freq="1y")) + pd.Timedelta("1d"), pd.Series(["2010-01-01", None, "2011-10-10"], dtype="datetime64[ns]"), pd.Series( [ None, None, None, None, None, None, "2011-10-10", "2010-01-01", "2010-01-02", "2010-01-04", "2010-11-01", ], dtype="datetime64[ns]", ), pd.Series( [ None, None, None, None, None, None, "2011-10-10", "2010-01-01", "2010-01-02", "2010-01-04", "2010-11-01", ], dtype="datetime64[ns]", index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"], ), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_datetime(psr_data, fill_value, inplace): psr = psr_data.copy(deep=True) gsr = cudf.from_pandas(psr) if isinstance(fill_value, pd.Series): fill_value_cudf = cudf.from_pandas(fill_value) else: fill_value_cudf = fill_value expected = psr.fillna(fill_value, inplace=inplace) got = gsr.fillna(fill_value_cudf, inplace=inplace) if inplace: got = gsr expected = psr assert_eq(expected, got) @pytest.mark.parametrize( "data", [ # Categorical pd.Categorical([1, 2, None, None, 3, 4]), pd.Categorical([None, None, 1, None, 3, 4]), pd.Categorical([1, 2, None, 3, 4, None, None]), pd.Categorical(["1", "20", None, None, "3", "40"]), pd.Categorical([None, None, "10", None, "30", "4"]), pd.Categorical(["1", "20", None, "30", "4", None, None]), # Datetime np.array( [ "2020-01-01 08:00:00", "2020-01-01 09:00:00", None, "2020-01-01 10:00:00", None, "2020-01-01 10:00:00", ], dtype="datetime64[ns]", ), np.array( [ None, None, "2020-01-01 09:00:00", "2020-01-01 10:00:00", None, "2020-01-01 10:00:00", ], dtype="datetime64[ns]", ), np.array( [ "2020-01-01 09:00:00", None, None, "2020-01-01 10:00:00", None, None, ], dtype="datetime64[ns]", ), # Timedelta np.array( [10, 100, 1000, None, None, 10, 100, 1000], dtype="datetime64[ns]" ), np.array( [None, None, 10, None, 1000, 100, 10], dtype="datetime64[ns]" ), np.array( [10, 100, None, None, 1000, None, None], dtype="datetime64[ns]" ), # String np.array( ["10", "100", "1000", None, None, "10", "100", "1000"], dtype="object", ), np.array( [None, None, "1000", None, "10", "100", "10"], dtype="object" ), np.array( ["10", "100", None, None, "1000", None, None], dtype="object" ), ], ) @pytest.mark.parametrize("container", [pd.Series, pd.DataFrame]) @pytest.mark.parametrize("method", ["ffill", "bfill"]) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_method_fixed_width_non_num(data, container, method, inplace): if container == pd.DataFrame: data = {"a": data, "b": data, "c": data} pdata = container(data) # Explicitly using nans_as_nulls=True gdata = cudf.from_pandas(pdata, nan_as_null=True) expected = pdata.fillna(method=method, inplace=inplace) actual = gdata.fillna(method=method, inplace=inplace) if inplace: expected = pdata actual = gdata assert_eq(expected, actual) @pytest.mark.parametrize( "df", [ pd.DataFrame({"a": [1, 2, None], "b": [None, None, 5]}), pd.DataFrame( {"a": [1, 2, None], "b": [None, None, 5]}, index=["a", "p", "z"] ), pd.DataFrame({"a": [1, 2, 3]}), ], ) @pytest.mark.parametrize( "value", [ 10, pd.Series([10, 20, 30]), pd.Series([3, 4, 5]), pd.Series([10, 20, 30], index=["z", "a", "p"]), {"a": 5, "b": pd.Series([3, 4, 5])}, {"a": 5001}, {"b": pd.Series([11, 22, 33], index=["a", "p", "z"])}, {"a": 5, "b": pd.Series([3, 4, 5], index=["a", "p", "z"])}, {"c": 100}, np.nan, ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_dataframe(df, value, inplace): pdf = df.copy(deep=True) gdf = cudf.from_pandas(pdf) fill_value_pd = value if isinstance(fill_value_pd, (pd.Series, pd.DataFrame)): fill_value_cudf = cudf.from_pandas(fill_value_pd) elif isinstance(fill_value_pd, dict): fill_value_cudf = {} for key in fill_value_pd: temp_val = fill_value_pd[key] if isinstance(temp_val, pd.Series): temp_val = cudf.from_pandas(temp_val) fill_value_cudf[key] = temp_val else: fill_value_cudf = value expect = pdf.fillna(fill_value_pd, inplace=inplace) got = gdf.fillna(fill_value_cudf, inplace=inplace) if inplace: got = gdf expect = pdf assert_eq(expect, got) @pytest.mark.parametrize( "ps_data", [ pd.Series(["a", "b", "c", "d"]), pd.Series([None] * 4, dtype="object"), pd.Series(["z", None, "z", None]), pd.Series(["x", "y", None, None, None]), pd.Series([None, None, None, "i", "P"]), ], ) @pytest.mark.parametrize( "fill_value", [ "a", pd.Series(["a", "b", "c", "d"]), pd.Series(["z", None, "z", None]), pd.Series([None] * 4, dtype="object"), pd.Series(["x", "y", None, None, None]), pd.Series([None, None, None, "i", "P"]), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_fillna_string(ps_data, fill_value, inplace): psr = ps_data.copy(deep=True) gsr = cudf.from_pandas(psr) if isinstance(fill_value, pd.Series): fill_value_cudf = cudf.from_pandas(fill_value) else: fill_value_cudf = fill_value expected = psr.fillna(fill_value, inplace=inplace) got = gsr.fillna(fill_value_cudf, inplace=inplace) if inplace: expected = psr got = gsr assert_eq(expected, got) @pytest.mark.parametrize("data_dtype", INTEGER_TYPES) def test_series_fillna_invalid_dtype(data_dtype): gdf = cudf.Series([1, 2, None, 3], dtype=data_dtype) fill_value = 2.5 with pytest.raises(TypeError) as raises: gdf.fillna(fill_value) raises.match( f"Cannot safely cast non-equivalent" f" {type(fill_value).__name__} to {gdf.dtype.type.__name__}" ) @pytest.mark.parametrize("data_dtype", NUMERIC_TYPES) @pytest.mark.parametrize("fill_value", [100, 100.0, 128.5]) def test_series_where(data_dtype, fill_value): psr = pd.Series(list(range(10)), dtype=data_dtype) sr = cudf.from_pandas(psr) if sr.dtype.type(fill_value) != fill_value: with pytest.raises(TypeError): sr.where(sr > 0, fill_value) else: # Cast back to original dtype as pandas automatically upcasts expect = psr.where(psr > 0, fill_value) got = sr.where(sr > 0, fill_value) # pandas returns 'float16' dtype, which is not supported in cudf assert_eq( expect, got, check_dtype=expect.dtype.kind not in ("f"), ) if sr.dtype.type(fill_value) != fill_value: with pytest.raises(TypeError): sr.where(sr < 0, fill_value) else: expect = psr.where(psr < 0, fill_value) got = sr.where(sr < 0, fill_value) # pandas returns 'float16' dtype, which is not supported in cudf assert_eq( expect, got, check_dtype=expect.dtype.kind not in ("f"), ) if sr.dtype.type(fill_value) != fill_value: with pytest.raises(TypeError): sr.where(sr == 0, fill_value) else: expect = psr.where(psr == 0, fill_value) got = sr.where(sr == 0, fill_value) # pandas returns 'float16' dtype, which is not supported in cudf assert_eq( expect, got, check_dtype=expect.dtype.kind not in ("f"), ) @pytest.mark.parametrize("fill_value", [100, 100.0, 100.5]) def test_series_with_nulls_where(fill_value): psr = pd.Series([None] * 3 + list(range(5))) sr = cudf.from_pandas(psr) expect = psr.where(psr > 0, fill_value) got = sr.where(sr > 0, fill_value) assert_eq(expect, got) expect = psr.where(psr < 0, fill_value) got = sr.where(sr < 0, fill_value) assert_eq(expect, got) expect = psr.where(psr == 0, fill_value) got = sr.where(sr == 0, fill_value) assert_eq(expect, got) @pytest.mark.parametrize("fill_value", [[888, 999]]) def test_dataframe_with_nulls_where_with_scalars(fill_value): pdf = pd.DataFrame( { "A": [-1, 2, -3, None, 5, 6, -7, 0], "B": [4, -2, 3, None, 7, 6, 8, 0], } ) gdf = cudf.from_pandas(pdf) expect = pdf.where(pdf % 3 == 0, fill_value) got = gdf.where(gdf % 3 == 0, fill_value) assert_eq(expect, got) def test_dataframe_with_different_types(): # Testing for int and float pdf = pd.DataFrame( {"A": [111, 22, 31, 410, 56], "B": [-10.12, 121.2, 45.7, 98.4, 87.6]} ) gdf = cudf.from_pandas(pdf) expect = pdf.where(pdf > 50, -pdf) got = gdf.where(gdf > 50, -gdf) assert_eq(expect, got) # Testing for string pdf = pd.DataFrame({"A": ["a", "bc", "cde", "fghi"]}) gdf = cudf.from_pandas(pdf) pdf_mask = pd.DataFrame({"A": [True, False, True, False]}) gdf_mask = cudf.from_pandas(pdf_mask) expect = pdf.where(pdf_mask, ["cudf"]) got = gdf.where(gdf_mask, ["cudf"]) assert_eq(expect, got) # Testing for categoriacal pdf = pd.DataFrame({"A": ["a", "b", "b", "c"]}) pdf["A"] = pdf["A"].astype("category") gdf = cudf.from_pandas(pdf) expect = pdf.where(pdf_mask, "c") got = gdf.where(gdf_mask, ["c"]) assert_eq(expect, got) def test_dataframe_where_with_different_options(): pdf = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]}) gdf = cudf.from_pandas(pdf) # numpy array boolean_mask = np.array([[False, True], [True, False], [False, True]]) expect = pdf.where(boolean_mask, -pdf) got = gdf.where(boolean_mask, -gdf) assert_eq(expect, got) # with single scalar expect = pdf.where(boolean_mask, 8) got = gdf.where(boolean_mask, 8) assert_eq(expect, got) # with multi scalar expect = pdf.where(boolean_mask, [8, 9]) got = gdf.where(boolean_mask, [8, 9]) assert_eq(expect, got) def test_series_multiple_times_with_nulls(): sr = cudf.Series([1, 2, 3, None]) expected = cudf.Series([None, None, None, None], dtype=np.int64) for i in range(3): got = sr.replace([1, 2, 3], None) assert_eq(expected, got) # BUG: #2695 # The following series will acquire a chunk of memory and update with # values, but these values may still linger even after the memory # gets released. This memory space might get used for replace in # subsequent calls and the memory used for mask may have junk values. # So, if it is not updated properly, the result would be wrong. # So, this will help verify that scenario. cudf.Series([1, 1, 1, None]) @pytest.mark.parametrize("series_dtype", NUMERIC_TYPES) @pytest.mark.parametrize( "replacement", [128, 128.0, 128.5, 32769, 32769.0, 32769.5] ) def test_numeric_series_replace_dtype(series_dtype, replacement): psr = pd.Series([0, 1, 2, 3, 4, 5], dtype=series_dtype) sr = cudf.from_pandas(psr) if sr.dtype.kind in "ui": can_replace = np.array([replacement])[0].is_integer() and np.can_cast( int(replacement), sr.dtype ) else: can_replace = np.can_cast(replacement, sr.dtype) # Both Scalar if not can_replace: with pytest.raises(TypeError): sr.replace(1, replacement) else: expect = psr.replace(1, replacement).astype(psr.dtype) got = sr.replace(1, replacement) assert_eq(expect, got) # to_replace is a list, replacement is a scalar if not can_replace: with pytest.raises(TypeError): sr.replace([2, 3], replacement) else: expect = psr.replace([2, 3], replacement).astype(psr.dtype) got = sr.replace([2, 3], replacement) assert_eq(expect, got) # If to_replace is a scalar and replacement is a list with pytest.raises(TypeError): sr.replace(0, [replacement, 2]) # Both list of unequal length with pytest.raises(ValueError): sr.replace([0, 1], [replacement]) # Both lists of equal length if ( np.dtype(type(replacement)).kind == "f" and sr.dtype.kind in {"i", "u"} ) or (not can_replace): with pytest.raises(TypeError): sr.replace([2, 3], [replacement, replacement]) else: expect = psr.replace([2, 3], [replacement, replacement]).astype( psr.dtype ) got = sr.replace([2, 3], [replacement, replacement]) assert_eq(expect, got) @pytest.mark.parametrize( "pframe, replace_args", [ ( pd.Series([5, 1, 2, 3, 4]), {"to_replace": 5, "value": 0, "inplace": True}, ), ( pd.Series([5, 1, 2, 3, 4]), {"to_replace": {5: 0, 3: -5}, "inplace": True}, ), (pd.Series([5, 1, 2, 3, 4]), {}), pytest.param( pd.Series(["one", "two", "three"], dtype="category"), {"to_replace": "one", "value": "two", "inplace": True}, marks=pytest.mark.xfail( condition=not PANDAS_GE_134, reason="https://github.com/pandas-dev/pandas/issues/43232", ), ), ( pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [5, 6, 7, 8, 9]}), {"to_replace": 5, "value": 0, "inplace": True}, ), ( pd.Series([1, 2, 3, 45]), { "to_replace": np.array([]).astype(int), "value": 77, "inplace": True, }, ), ( pd.Series([1, 2, 3, 45]), { "to_replace": np.array([]).astype(int), "value": 77, "inplace": False, }, ), ( pd.DataFrame({"a": [1, 2, 3, 4, 5, 666]}), {"to_replace": {"a": 2}, "value": {"a": -33}, "inplace": True}, ), ( pd.DataFrame({"a": [1, 2, 3, 4, 5, 666]}), { "to_replace": {"a": [2, 5]}, "value": {"a": [9, 10]}, "inplace": True, }, ), ( pd.DataFrame({"a": [1, 2, 3, 4, 5, 666]}), {"to_replace": [], "value": [], "inplace": True}, ), ], ) def test_replace_inplace(pframe, replace_args): gpu_frame = cudf.from_pandas(pframe) pandas_frame = pframe.copy() gpu_copy = gpu_frame.copy() cpu_copy = pandas_frame.copy() assert_eq(gpu_frame, pandas_frame) assert_eq(gpu_copy, cpu_copy) gpu_frame.replace(**replace_args) pandas_frame.replace(**replace_args) assert_eq(gpu_frame, pandas_frame) assert_eq(gpu_copy, cpu_copy) def test_replace_df_error(): pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5, 666]}) gdf = cudf.from_pandas(pdf) assert_exceptions_equal( lfunc=pdf.replace, rfunc=gdf.replace, lfunc_args_and_kwargs=([], {"to_replace": -1, "value": []}), rfunc_args_and_kwargs=([], {"to_replace": -1, "value": []}), ) @pytest.mark.parametrize( ("lower", "upper"), [ ([2, 7.4], [4, 7.9]), ([2, 7.4], None), ( None, [4, 7.9], ), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_dataframe_clip(lower, upper, inplace): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5], "b": [7.1, 7.24, 7.5, 7.8, 8.11]} ) gdf = cudf.from_pandas(pdf) got = gdf.clip(lower=lower, upper=upper, inplace=inplace) expect = pdf.clip(lower=lower, upper=upper, axis=1) if inplace is True: assert_eq(expect, gdf) else: assert_eq(expect, got) @pytest.mark.parametrize( ("lower", "upper"), [("b", "d"), ("b", None), (None, "c"), (None, None)], ) @pytest.mark.parametrize("inplace", [True, False]) def test_dataframe_category_clip(lower, upper, inplace): data = ["a", "b", "c", "d", "e"] pdf = pd.DataFrame({"a": data}) gdf = cudf.from_pandas(pdf) gdf["a"] = gdf["a"].astype("category") expect = pdf.clip(lower=lower, upper=upper) got = gdf.clip(lower=lower, upper=upper, inplace=inplace) if inplace is True: assert_eq(expect, gdf.astype("str")) else: assert_eq(expect, got.astype("str")) @pytest.mark.parametrize( ("lower", "upper"), [([2, 7.4], [4, 7.9, "d"]), ([2, 7.4, "a"], [4, 7.9, "d"])], ) def test_dataframe_exceptions_for_clip(lower, upper): gdf = cudf.DataFrame( {"a": [1, 2, 3, 4, 5], "b": [7.1, 7.24, 7.5, 7.8, 8.11]} ) with pytest.raises(ValueError): gdf.clip(lower=lower, upper=upper) @pytest.mark.parametrize( ("data", "lower", "upper"), [ ([1, 2, 3, 4, 5], 2, 4), ([1, 2, 3, 4, 5], 2, None), ([1, 2, 3, 4, 5], None, 4), ([1, 2, 3, 4, 5], None, None), ([1, 2, 3, 4, 5], 4, 2), ([1.0, 2.0, 3.0, 4.0, 5.0], 4, 2), (pd.Series([1, 2, 3, 4, 5], dtype="int32"), 4, 2), (["a", "b", "c", "d", "e"], "b", "d"), (["a", "b", "c", "d", "e"], "b", None), (["a", "b", "c", "d", "e"], None, "d"), (["a", "b", "c", "d", "e"], "d", "b"), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_series_clip(data, lower, upper, inplace): psr = pd.Series(data) gsr = cudf.from_pandas(psr) expect = psr.clip(lower=lower, upper=upper) got = gsr.clip(lower=lower, upper=upper, inplace=inplace) if inplace is True: assert_eq(expect, gsr) else: assert_eq(expect, got) def test_series_exceptions_for_clip(): with pytest.raises(ValueError): cudf.Series([1, 2, 3, 4]).clip([1, 2], [2, 3]) with pytest.raises(NotImplementedError): cudf.Series([1, 2, 3, 4]).clip(1, 2, axis=0) @pytest.mark.parametrize( "data", [[1, 2.0, 3, 4, None, 1, None, 10, None], ["a", "b", "c"]] ) @pytest.mark.parametrize( "index", [ None, [1, 2, 3], ["a", "b", "z"], ["a", "b", "c", "d", "e", "f", "g", "l", "m"], ], ) @pytest.mark.parametrize("value", [[1, 2, 3, 4, None, 1, None, 10, None]]) def test_series_fillna(data, index, value): psr = pd.Series( data, index=index if index is not None and len(index) == len(data) else None, ) gsr = cudf.Series( data, index=index if index is not None and len(index) == len(data) else None, ) expect = psr.fillna(pd.Series(value)) got = gsr.fillna(cudf.Series(value)) assert_eq(expect, got) def test_series_fillna_error(): psr = pd.Series([1, 2, None, 3, None]) gsr = cudf.from_pandas(psr) assert_exceptions_equal( psr.fillna, gsr.fillna, ([pd.DataFrame({"a": [1, 2, 3]})],), ([cudf.DataFrame({"a": [1, 2, 3]})],), ) def test_series_replace_errors(): gsr = cudf.Series([1, 2, None, 3, None]) psr = gsr.to_pandas() with pytest.raises( TypeError, match=re.escape( "to_replace and value should be of same types," "got to_replace dtype: int64 and " "value dtype: object" ), ): gsr.replace(1, "a") gsr = cudf.Series(["a", "b", "c"]) with pytest.raises( TypeError, match=re.escape( "to_replace and value should be of same types," "got to_replace dtype: int64 and " "value dtype: object" ), ): gsr.replace([1, 2], ["a", "b"]) assert_exceptions_equal( psr.replace, gsr.replace, ([{"a": 1}, 1],), ([{"a": 1}, 1],), ) assert_exceptions_equal( lfunc=psr.replace, rfunc=gsr.replace, lfunc_args_and_kwargs=([[1, 2], [1]],), rfunc_args_and_kwargs=([[1, 2], [1]],), ) assert_exceptions_equal( lfunc=psr.replace, rfunc=gsr.replace, lfunc_args_and_kwargs=([object(), [1]],), rfunc_args_and_kwargs=([object(), [1]],), ) assert_exceptions_equal( lfunc=psr.replace, rfunc=gsr.replace, lfunc_args_and_kwargs=([{"a": 1}, object()],), rfunc_args_and_kwargs=([{"a": 1}, object()],), ) @pytest.mark.parametrize( "gsr,old,new,expected", [ ( cudf.Series(["a", "b", "c", None]), None, "a", cudf.Series(["a", "b", "c", "a"]), ), ( cudf.Series(["a", "b", "c", None]), [None, "a", "a"], ["c", "b", "d"], cudf.Series(["d", "b", "c", "c"]), ), ( cudf.Series(["a", "b", "c", None]), [None, "a"], ["b", None], cudf.Series([None, "b", "c", "b"]), ), ( cudf.Series(["a", "b", "c", None]), [None, None], [None, None], cudf.Series(["a", "b", "c", None]), ), (cudf.Series([1, 2, None, 3]), None, 10, cudf.Series([1, 2, 10, 3])), ( cudf.Series([1, 2, None, 3]), [None, 1, 1], [3, 2, 4], cudf.Series([4, 2, 3, 3]), ), ( cudf.Series([1, 2, None, 3]), [None, 1], [2, None], cudf.Series([None, 2, 2, 3]), ), ( cudf.Series(["a", "q", "t", None], dtype="category"), None, "z", cudf.Series(["a", "q", "t", "z"], dtype="category"), ), ( cudf.Series(["a", "q", "t", None], dtype="category"), [None, "a", "q"], ["z", None, None], cudf.Series([None, None, "t", "z"], dtype="category"), ), ( cudf.Series(["a", None, "t", None], dtype="category"), [None, "t"], ["p", None], cudf.Series(["a", "p", None, "p"], dtype="category"), ), ], ) def test_replace_nulls(gsr, old, new, expected): actual = gsr.replace(old, new) assert_eq( expected.sort_values().reset_index(drop=True), actual.sort_values().reset_index(drop=True), ) def test_fillna_columns_multiindex(): columns = pd.MultiIndex.from_tuples([("a", "b"), ("d", "e")]) pdf = pd.DataFrame( {"0": [1, 2, None, 3, None], "1": [None, None, None, None, 4]} ) pdf.columns = columns gdf = cudf.from_pandas(pdf) expected = pdf.fillna(10) actual = gdf.fillna(10) assert_eq(expected, actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_search.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import cupy import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq, gen_rand, random_bitmask @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("obj_class", ["series", "index", "column"]) @pytest.mark.parametrize("vals_class", ["series", "index"]) def test_searchsorted(side, obj_class, vals_class): nelem = 1000 column_data = gen_rand("float64", nelem) column_mask = random_bitmask(nelem) values_data = gen_rand("float64", nelem) values_mask = random_bitmask(nelem) sr = cudf.Series.from_masked_array(column_data, column_mask) vals = cudf.Series.from_masked_array(values_data, values_mask) sr = sr.sort_values() # Reference object can be Series, Index, or Column if obj_class == "index": sr.reset_index(drop=True) elif obj_class == "column": sr = sr._column # Values can be Series or Index if vals_class == "index": vals.reset_index(drop=True) psr = sr.to_pandas() pvals = vals.to_pandas() expect = psr.searchsorted(pvals, side) if obj_class == "column": got = sr.searchsorted(vals._column, side) else: got = sr.searchsorted(vals, side) assert_eq(expect, cupy.asnumpy(got)) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("multiindex", [True, False]) def test_searchsorted_dataframe(side, multiindex): values = cudf.DataFrame( { "a": [1, 0, 5, 1], "b": [-0.998, 0.031, -0.888, -0.998], "c": ["C", "A", "G", "B"], } ) base = cudf.DataFrame( { "a": [1, 1, 1, 5], "b": [-0.999, -0.998, -0.997, -0.888], "c": ["A", "C", "E", "G"], } ) if multiindex: base = base.set_index(["a", "b", "c"]).index values = values.set_index(["a", "b", "c"]).index result = base.searchsorted(values, side=side).tolist() if side == "left": assert result == [1, 0, 3, 1] else: assert result == [2, 0, 4, 1] def test_search_sorted_dataframe_unequal_number_of_columns(): values = cudf.DataFrame({"a": [1, 0, 5, 1]}) base = cudf.DataFrame({"a": [1, 0, 5, 1], "b": ["x", "z", "w", "a"]}) with pytest.raises(ValueError, match="Mismatch number of columns"): base.searchsorted(values) @pytest.mark.parametrize("side", ["left", "right"]) def test_searchsorted_categorical(side): cat1 = pd.Categorical( ["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True ) psr1 = pd.Series(cat1).sort_values() sr1 = cudf.Series(cat1).sort_values() cat2 = pd.Categorical( ["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True ) psr2 = pd.Series(cat2) sr2 = cudf.Series(cat2) expect = psr1.searchsorted(psr2, side) got = sr1.searchsorted(sr2, side) assert_eq(expect, cupy.asnumpy(got)) @pytest.mark.parametrize("side", ["left", "right"]) def test_searchsorted_datetime(side): psr1 = pd.Series( pd.date_range("20190101", "20200101", freq="400h", name="times") ) sr1 = cudf.from_pandas(psr1) psr2 = pd.Series( np.array( [ np.datetime64("2019-11-20"), np.datetime64("2019-04-15"), np.datetime64("2019-02-20"), np.datetime64("2019-05-31"), np.datetime64("2020-01-02"), ] ) ) sr2 = cudf.from_pandas(psr2) expect = psr1.searchsorted(psr2, side) got = sr1.searchsorted(sr2, side) assert_eq(expect, cupy.asnumpy(got)) def test_searchsorted_misc(): psr = pd.Series([1, 2, 3.4, 6]) sr = cudf.from_pandas(psr) assert_eq(psr.searchsorted(1), sr.searchsorted(1)) assert_eq(psr.searchsorted(0), sr.searchsorted(0)) assert_eq(psr.searchsorted(4), sr.searchsorted(4)) assert_eq(psr.searchsorted(5), sr.searchsorted(5)) assert_eq( psr.searchsorted([-100, 3.4, 2.2, 2.0, 2.000000001]), sr.searchsorted([-100, 3.4, 2.2, 2.0, 2.000000001]), ) psr = pd.Series([1, 2, 3]) sr = cudf.from_pandas(psr) assert_eq(psr.searchsorted(1), sr.searchsorted(1)) assert_eq( psr.searchsorted([0, 1, 2, 3, 4, -4, -3, -2, -1, 0, -120]), sr.searchsorted([0, 1, 2, 3, 4, -4, -3, -2, -1, 0, -120]), ) assert_eq(psr.searchsorted(1.5), sr.searchsorted(1.5)) assert_eq(psr.searchsorted(1.99), sr.searchsorted(1.99)) assert_eq(psr.searchsorted(3.00001), sr.searchsorted(3.00001)) assert_eq( psr.searchsorted([-100, 3.00001, 2.2, 2.0, 2.000000001]), sr.searchsorted([-100, 3.00001, 2.2, 2.0, 2.000000001]), ) @pytest.mark.xfail(reason="https://github.com/pandas-dev/pandas/issues/54668") def test_searchsorted_mixed_str_int(): psr = pd.Series([1, 2, 3], dtype="int") sr = cudf.from_pandas(psr) with pytest.raises(ValueError): actual = sr.searchsorted("a") with pytest.raises(ValueError): expect = psr.searchsorted("a") assert_eq(expect, actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_quantiles.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import re import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq, assert_exceptions_equal def test_single_q(): q = 0.5 pdf = pd.DataFrame({"a": [4, 24, 13, 8, 7]}) gdf = cudf.from_pandas(pdf) pdf_q = pdf.quantile(q, interpolation="nearest") gdf_q = gdf.quantile(q, interpolation="nearest", method="table") assert_eq(pdf_q, gdf_q, check_index_type=False) def test_with_index(): q = [0, 0.5, 1] pdf = pd.DataFrame({"a": [7, 4, 4, 9, 13]}, index=[0, 4, 3, 2, 7]) gdf = cudf.from_pandas(pdf) pdf_q = pdf.quantile(q, interpolation="nearest") gdf_q = gdf.quantile(q, interpolation="nearest", method="table") assert_eq(pdf_q, gdf_q, check_index_type=False) def test_with_multiindex(): q = [0, 0.5, 1] pdf = pd.DataFrame( { "index_1": [3, 1, 9, 7, 5], "index_2": [2, 4, 3, 5, 1], "a": [8, 4, 2, 3, 8], } ) pdf.set_index(["index_1", "index_2"], inplace=True) gdf = cudf.from_pandas(pdf) pdf_q = pdf.quantile(q, interpolation="nearest") gdf_q = gdf.quantile(q, interpolation="nearest", method="table") assert_eq(pdf_q, gdf_q, check_index_type=False) @pytest.mark.parametrize("q", [2, [1, 2, 3]]) def test_quantile_range_error(q): ps = pd.Series([1, 2, 3]) gs = cudf.from_pandas(ps) assert_exceptions_equal( lfunc=ps.quantile, rfunc=gs.quantile, lfunc_args_and_kwargs=([q],), rfunc_args_and_kwargs=([q],), ) def test_quantile_q_type(): gs = cudf.Series([1, 2, 3]) with pytest.raises( TypeError, match=re.escape( "q must be a scalar or array-like, got <class " "'cudf.core.dataframe.DataFrame'>" ), ): gs.quantile(cudf.DataFrame()) @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"] ) def test_quantile_type_int_float(interpolation): data = [1, 3, 4] psr = pd.Series(data) gsr = cudf.Series(data) expected = psr.quantile(0.5, interpolation=interpolation) actual = gsr.quantile(0.5, interpolation=interpolation) assert expected == actual assert type(expected) == type(actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_dlpack.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import itertools from contextlib import ExitStack as does_not_raise import cupy import numpy as np import pytest from packaging import version import cudf from cudf.testing._utils import assert_eq nelems = [0, 3, 10] dtype = [np.uint16, np.int32, np.float64] nulls = ["some", "none"] params_1d = itertools.product(nelems, dtype, nulls) ncols = [0, 1, 2] params_2d = itertools.product(ncols, nelems, dtype, nulls) if version.parse(cupy.__version__) < version.parse("10"): # fromDlpack deprecated in cupy version 10, replaced by from_dlpack cupy_from_dlpack = cupy.fromDlpack else: cupy_from_dlpack = cupy.from_dlpack def data_size_expectation_builder(data, nan_null_param=False): if nan_null_param and np.isnan(data).any(): return pytest.raises((ValueError,)) if len(data.shape) == 2 and data.size == 0: return pytest.raises((ValueError, IndexError)) else: return does_not_raise() @pytest.fixture(params=params_1d) def data_1d(request): nelems = request.param[0] dtype = request.param[1] nulls = request.param[2] a = np.random.randint(10, size=nelems).astype(dtype) if nulls == "some" and a.size != 0 and np.issubdtype(dtype, np.floating): idx = np.random.choice(a.size, size=int(a.size * 0.2), replace=False) a[idx] = np.nan return a @pytest.fixture(params=params_2d) def data_2d(request): ncols = request.param[0] nrows = request.param[1] dtype = request.param[2] nulls = request.param[3] a = np.random.randint(10, size=(nrows, ncols)).astype(dtype) if nulls == "some" and a.size != 0 and np.issubdtype(dtype, np.floating): idx = np.random.choice(a.size, size=int(a.size * 0.2), replace=False) a.ravel()[idx] = np.nan return np.ascontiguousarray(a) def test_to_dlpack_dataframe(data_2d): expectation = data_size_expectation_builder(data_2d) with expectation: gdf = cudf.DataFrame.from_records(data_2d) dlt = gdf.to_dlpack() # PyCapsules are a C-API thing so couldn't come up with a better way assert str(type(dlt)) == "<class 'PyCapsule'>" def test_to_dlpack_series(data_1d): expectation = data_size_expectation_builder(data_1d, nan_null_param=False) with expectation: gs = cudf.Series(data_1d, nan_as_null=False) dlt = gs.to_dlpack() # PyCapsules are a C-API thing so couldn't come up with a better way assert str(type(dlt)) == "<class 'PyCapsule'>" def test_to_dlpack_series_null(data_1d): expectation = data_size_expectation_builder(data_1d, nan_null_param=True) with expectation: gs = cudf.Series(data_1d, nan_as_null=True) dlt = gs.to_dlpack() # PyCapsules are a C-API thing so couldn't come up with a better way assert str(type(dlt)) == "<class 'PyCapsule'>" def test_to_dlpack_index(data_1d): expectation = data_size_expectation_builder(data_1d) with expectation: if np.isnan(data_1d).any(): pytest.skip("Nulls not allowed in Index") gi = cudf.core.index.as_index(data_1d) dlt = gi.to_dlpack() # PyCapsules are a C-API thing so couldn't come up with a better way assert str(type(dlt)) == "<class 'PyCapsule'>" def test_to_dlpack_cupy_1d(data_1d): expectation = data_size_expectation_builder(data_1d, False) with expectation: gs = cudf.Series(data_1d, nan_as_null=False) cudf_host_array = gs.to_numpy(na_value=np.nan) dlt = gs.to_dlpack() cupy_array = cupy_from_dlpack(dlt) cupy_host_array = cupy_array.get() assert_eq(cudf_host_array, cupy_host_array) def test_to_dlpack_cupy_2d(data_2d): expectation = data_size_expectation_builder(data_2d) with expectation: gdf = cudf.DataFrame.from_records(data_2d) cudf_host_array = np.array(gdf.to_pandas()).flatten() dlt = gdf.to_dlpack() cupy_array = cupy_from_dlpack(dlt) cupy_host_array = cupy_array.get().flatten() assert_eq(cudf_host_array, cupy_host_array) def test_from_dlpack_cupy_1d(data_1d): cupy_array = cupy.array(data_1d) cupy_host_array = cupy_array.get() dlt = cupy_array.toDlpack() gs = cudf.from_dlpack(dlt) cudf_host_array = gs.to_numpy(na_value=np.nan) assert_eq(cudf_host_array, cupy_host_array) def test_from_dlpack_cupy_2d(data_2d): cupy_array = cupy.array(data_2d, order="F") cupy_host_array = cupy_array.get().flatten() dlt = cupy_array.toDlpack() gdf = cudf.from_dlpack(dlt) cudf_host_array = np.array(gdf.to_pandas()).flatten() assert_eq(cudf_host_array, cupy_host_array) def test_to_dlpack_cupy_2d_null(data_2d): expectation = data_size_expectation_builder(data_2d, nan_null_param=True) with expectation: gdf = cudf.DataFrame.from_records(data_2d, nan_as_null=True) cudf_host_array = np.array(gdf.to_pandas()).flatten() dlt = gdf.to_dlpack() cupy_array = cupy_from_dlpack(dlt) cupy_host_array = cupy_array.get().flatten() assert_eq(cudf_host_array, cupy_host_array) def test_to_dlpack_cupy_1d_null(data_1d): expectation = data_size_expectation_builder(data_1d, nan_null_param=True) with expectation: gs = cudf.Series(data_1d) cudf_host_array = gs.to_numpy(na_value=np.nan) dlt = gs.to_dlpack() cupy_array = cupy_from_dlpack(dlt) cupy_host_array = cupy_array.get() assert_eq(cudf_host_array, cupy_host_array) def test_to_dlpack_mixed_dtypes(): df = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [10.32, 0.4, -0.2, -1000.32]}) cudf_host_array = df.to_numpy() dlt = df.to_dlpack() cupy_array = cupy_from_dlpack(dlt) cupy_host_array = cupy_array.get() assert_eq(cudf_host_array, cupy_host_array) @pytest.mark.parametrize( "shape", [ (0, 3), pytest.param( (3, 0), marks=pytest.mark.xfail( reason="Index information not available via from_dlpack" ), ), (0, 0), ], ) def test_from_dlpack_zero_sizes(shape): arr = cupy.empty(shape, dtype=float) df = cudf.io.dlpack.from_dlpack(arr.__dlpack__()) assert_eq(df, cudf.DataFrame(arr))
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_datetime.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. import datetime import operator import cupy as cp import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf import cudf.testing.dataset_generator as dataset_generator from cudf import DataFrame, Series from cudf.core._compat import PANDAS_GE_150, PANDAS_LT_140 from cudf.core.index import DatetimeIndex from cudf.testing._utils import ( DATETIME_TYPES, NUMERIC_TYPES, assert_eq, assert_exceptions_equal, expect_warning_if, ) _cmpops = [ operator.lt, operator.gt, operator.le, operator.ge, operator.eq, operator.ne, ] def data1(): return pd.date_range("20010101", "20020215", freq="400h", name="times") def data2(): return pd.date_range( "20010101", freq="243434324423423234N", name="times", periods=10 ) def timeseries_us_data(): return pd.date_range( "2019-07-16 00:00:00", "2019-07-16 00:00:01", freq="5555us", name="times", ) def timestamp_ms_data(): return pd.Series( [ "2019-07-16 00:00:00.333", "2019-07-16 00:00:00.666", "2019-07-16 00:00:00.888", ] ) def timestamp_us_data(): return pd.Series( [ "2019-07-16 00:00:00.333333", "2019-07-16 00:00:00.666666", "2019-07-16 00:00:00.888888", ] ) def timestamp_ns_data(): return pd.Series( [ "2019-07-16 00:00:00.333333333", "2019-07-16 00:00:00.666666666", "2019-07-16 00:00:00.888888888", ] ) def numerical_data(): return np.arange(1, 10) fields = [ "year", "month", "day", "hour", "minute", "second", "microsecond", "nanosecond", "weekday", "dayofweek", "dayofyear", "day_of_year", ] @pytest.mark.parametrize("data", [data1(), data2()]) def test_series(data): pd_data = pd.Series(data.copy()) gdf_data = Series(pd_data) assert_eq(pd_data, gdf_data) @pytest.mark.parametrize( "lhs_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) @pytest.mark.parametrize( "rhs_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_datetime_series_binops_pandas(lhs_dtype, rhs_dtype): pd_data_1 = pd.Series( pd.date_range("20010101", "20020215", freq="400h", name="times") ) pd_data_2 = pd.Series( pd.date_range("20010101", "20020215", freq="401h", name="times") ) gdf_data_1 = Series(pd_data_1).astype(lhs_dtype) gdf_data_2 = Series(pd_data_2).astype(rhs_dtype) assert_eq(pd_data_1, gdf_data_1.astype("datetime64[ns]")) assert_eq(pd_data_2, gdf_data_2.astype("datetime64[ns]")) assert_eq(pd_data_1 < pd_data_2, gdf_data_1 < gdf_data_2) assert_eq(pd_data_1 > pd_data_2, gdf_data_1 > gdf_data_2) assert_eq(pd_data_1 == pd_data_2, gdf_data_1 == gdf_data_2) assert_eq(pd_data_1 <= pd_data_2, gdf_data_1 <= gdf_data_2) assert_eq(pd_data_1 >= pd_data_2, gdf_data_1 >= gdf_data_2) @pytest.mark.parametrize( "lhs_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) @pytest.mark.parametrize( "rhs_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_datetime_series_binops_numpy(lhs_dtype, rhs_dtype): pd_data_1 = pd.Series( pd.date_range("20010101", "20020215", freq="400h", name="times") ) pd_data_2 = pd.Series( pd.date_range("20010101", "20020215", freq="401h", name="times") ) gdf_data_1 = Series(pd_data_1).astype(lhs_dtype) gdf_data_2 = Series(pd_data_2).astype(rhs_dtype) np_data_1 = np.array(pd_data_1).astype(lhs_dtype) np_data_2 = np.array(pd_data_2).astype(rhs_dtype) np.testing.assert_equal(np_data_1, gdf_data_1.to_numpy()) np.testing.assert_equal(np_data_2, gdf_data_2.to_numpy()) np.testing.assert_equal( np.less(np_data_1, np_data_2), (gdf_data_1 < gdf_data_2).to_numpy() ) np.testing.assert_equal( np.greater(np_data_1, np_data_2), (gdf_data_1 > gdf_data_2).to_numpy() ) np.testing.assert_equal( np.equal(np_data_1, np_data_2), (gdf_data_1 == gdf_data_2).to_numpy() ) np.testing.assert_equal( np.less_equal(np_data_1, np_data_2), (gdf_data_1 <= gdf_data_2).to_numpy(), ) np.testing.assert_equal( np.greater_equal(np_data_1, np_data_2), (gdf_data_1 >= gdf_data_2).to_numpy(), ) @pytest.mark.parametrize("data", [data1(), data2()]) def test_dt_ops(data): pd_data = pd.Series(data.copy()) gdf_data = Series(data.copy()) assert_eq(pd_data == pd_data, gdf_data == gdf_data) assert_eq(pd_data < pd_data, gdf_data < gdf_data) assert_eq(pd_data > pd_data, gdf_data > gdf_data) # libcudf doesn't respect timezones @pytest.mark.parametrize("data", [data1(), data2()]) @pytest.mark.parametrize("field", fields) def test_dt_series(data, field): pd_data = pd.Series(data.copy()) gdf_data = Series(pd_data) base = getattr(pd_data.dt, field) test = getattr(gdf_data.dt, field).to_pandas().astype("int64") assert_eq(base, test) @pytest.mark.parametrize("data", [data1(), data2()]) @pytest.mark.parametrize("field", fields) def test_dt_index(data, field): pd_data = data.copy() gdf_data = DatetimeIndex(pd_data) assert_eq(getattr(gdf_data, field), getattr(pd_data, field)) def test_setitem_datetime(): df = DataFrame() df["date"] = pd.date_range("20010101", "20010105").values assert np.issubdtype(df.date.dtype, np.datetime64) def test_sort_datetime(): df = pd.DataFrame() df["date"] = np.array( [ np.datetime64("2016-11-20"), np.datetime64("2020-11-20"), np.datetime64("2019-11-20"), np.datetime64("1918-11-20"), np.datetime64("2118-11-20"), ] ) df["vals"] = np.random.sample(len(df["date"])) gdf = cudf.from_pandas(df) s_df = df.sort_values(by="date") s_gdf = gdf.sort_values(by="date") assert_eq(s_df, s_gdf) def test_issue_165(): df_pandas = pd.DataFrame() start_date = datetime.datetime.strptime("2000-10-21", "%Y-%m-%d") data = [(start_date + datetime.timedelta(days=x)) for x in range(6)] df_pandas["dates"] = data df_pandas["num"] = [1, 2, 3, 4, 5, 6] df_cudf = DataFrame.from_pandas(df_pandas) base = df_pandas.query("dates==@start_date") test = df_cudf.query("dates==@start_date") assert_eq(base, test) assert len(test) > 0 mask = df_cudf.dates == start_date base_mask = df_pandas.dates == start_date assert_eq(mask, base_mask, check_names=False) assert mask.to_pandas().sum() > 0 start_date_ts = pd.Timestamp(start_date) test = df_cudf.query("dates==@start_date_ts") base = df_pandas.query("dates==@start_date_ts") assert_eq(base, test) assert len(test) > 0 mask = df_cudf.dates == start_date_ts base_mask = df_pandas.dates == start_date_ts assert_eq(mask, base_mask, check_names=False) assert mask.to_pandas().sum() > 0 start_date_np = np.datetime64(start_date_ts, "ns") test = df_cudf.query("dates==@start_date_np") base = df_pandas.query("dates==@start_date_np") assert_eq(base, test) assert len(test) > 0 mask = df_cudf.dates == start_date_np base_mask = df_pandas.dates == start_date_np assert_eq(mask, base_mask, check_names=False) assert mask.to_pandas().sum() > 0 @pytest.mark.parametrize("data", [data1(), data2()]) @pytest.mark.parametrize("dtype", NUMERIC_TYPES) def test_typecast_from_datetime(data, dtype): pd_data = pd.Series(data.copy()) np_data = np.array(pd_data) gdf_data = Series(pd_data) np_casted = np_data.astype(dtype) gdf_casted = gdf_data.astype(dtype) np.testing.assert_equal(np_casted, gdf_casted.to_numpy()) @pytest.mark.parametrize("data", [data1(), data2()]) @pytest.mark.parametrize( "dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_typecast_from_datetime_to_int64_to_datetime(data, dtype): pd_data = pd.Series(data.copy()) np_data = np.array(pd_data) gdf_data = Series(pd_data) np_casted = np_data.astype(np.int64).astype(dtype) gdf_casted = gdf_data.astype(np.int64).astype(dtype) np.testing.assert_equal(np_casted, gdf_casted.to_numpy()) @pytest.mark.parametrize("data", [timeseries_us_data()]) @pytest.mark.parametrize( "dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_typecast_to_different_datetime_resolutions(data, dtype): pd_data = pd.Series(data.copy()) np_data = np.array(pd_data).astype(dtype) gdf_series = Series(pd_data).astype(dtype) np.testing.assert_equal(np_data, gdf_series.to_numpy()) @pytest.mark.parametrize( "data", [timestamp_ms_data(), timestamp_us_data(), timestamp_ns_data()] ) @pytest.mark.parametrize( "dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_string_timstamp_typecast_to_different_datetime_resolutions( data, dtype ): pd_sr = data gdf_sr = cudf.Series.from_pandas(pd_sr) expect = pd_sr.values.astype(dtype) got = gdf_sr.astype(dtype).values_host np.testing.assert_equal(expect, got) @pytest.mark.parametrize("data", [numerical_data()]) @pytest.mark.parametrize("from_dtype", NUMERIC_TYPES) @pytest.mark.parametrize( "to_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_typecast_to_datetime(data, from_dtype, to_dtype): np_data = data.astype(from_dtype) gdf_data = Series(np_data) np_casted = np_data.astype(to_dtype) gdf_casted = gdf_data.astype(to_dtype) np.testing.assert_equal(np_casted, gdf_casted.to_numpy()) @pytest.mark.parametrize("data", [numerical_data()]) @pytest.mark.parametrize("from_dtype", NUMERIC_TYPES) @pytest.mark.parametrize( "to_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_typecast_to_from_datetime(data, from_dtype, to_dtype): np_data = data.astype(from_dtype) gdf_data = Series(np_data) np_casted = np_data.astype(to_dtype).astype(from_dtype) gdf_casted = gdf_data.astype(to_dtype).astype(from_dtype) np.testing.assert_equal(np_casted, gdf_casted.to_numpy()) @pytest.mark.parametrize("data", [numerical_data()]) @pytest.mark.parametrize( "from_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) @pytest.mark.parametrize( "to_dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_typecast_from_datetime_to_datetime(data, from_dtype, to_dtype): np_data = data.astype(from_dtype) ser = Series(np_data) np_casted = np_data.astype(to_dtype) ser_casted = ser.astype(to_dtype) np.testing.assert_equal(np_casted, ser_casted.to_numpy()) @pytest.mark.parametrize("data", [numerical_data()]) @pytest.mark.parametrize("nulls", ["some", "all"]) def test_to_from_pandas_nulls(data, nulls): pd_data = pd.Series(data.copy().astype("datetime64[ns]")) if nulls == "some": # Fill half the values with NaT pd_data[list(range(0, len(pd_data), 2))] = np.datetime64("nat", "ns") elif nulls == "all": # Fill all the values with NaT pd_data[:] = np.datetime64("nat", "ns") gdf_data = Series.from_pandas(pd_data) expect = pd_data got = gdf_data.to_pandas() assert_eq(expect, got) @pytest.mark.parametrize( "dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_datetime_to_arrow(dtype): timestamp = ( cudf.datasets.timeseries( start="2000-01-01", end="2000-01-02", freq="3600s", dtypes={} ) .reset_index()["timestamp"] .reset_index(drop=True) ) gdf = DataFrame({"timestamp": timestamp.astype(dtype)}) assert_eq(gdf, DataFrame.from_arrow(gdf.to_arrow(preserve_index=False))) @pytest.mark.parametrize( "data", [ pd.Series([], dtype="datetime64[ns]"), pd.Series(pd.date_range("2010-01-01", "2010-02-01")), pd.Series([None, None], dtype="datetime64[ns]"), ], ) @pytest.mark.parametrize("nulls", ["none", "some"]) def test_datetime_unique(data, nulls): psr = data.copy() if len(data) > 0: if nulls == "some": p = np.random.randint(0, len(data), 2) psr[p] = None gsr = cudf.from_pandas(psr) expected = psr.unique() got = gsr.unique() # Unique does not provide a guarantee on ordering. assert_eq( pd.Series(expected).sort_values(ignore_index=True), got.sort_values(ignore_index=True).to_pandas(), ) @pytest.mark.parametrize( "data", [ pd.Series([], dtype="datetime64[ns]"), pd.Series(pd.date_range("2010-01-01", "2010-02-01")), pd.Series([None, None], dtype="datetime64[ns]"), ], ) @pytest.mark.parametrize("nulls", ["none", "some"]) def test_datetime_nunique(data, nulls): psr = data.copy() if len(data) > 0: if nulls == "some": p = np.random.randint(0, len(data), 2) psr[p] = None gsr = cudf.from_pandas(psr) expected = psr.nunique() got = gsr.nunique() assert_eq(got, expected) testdata = [ ( Series( ["2018-01-01", None, "2019-01-31", None, "2018-01-01"], dtype="datetime64[ms]", ), True, ), ( Series( [ "2018-01-01", "2018-01-02", "2019-01-31", "2018-03-01", "2018-01-01", ], dtype="datetime64[ms]", ), False, ), ( Series( np.array( ["2018-01-01", None, "2019-12-30"], dtype="datetime64[ms]" ) ), True, ), ] @pytest.mark.parametrize("data, expected", testdata) def test_datetime_has_null_test(data, expected): pd_data = data.to_pandas() count = pd_data.notna().value_counts() expected_count = 0 if False in count.keys(): expected_count = count[False] assert_eq(expected, data.has_nulls) assert_eq(expected_count, data.null_count) def test_datetime_has_null_test_pyarrow(): data = Series( pa.array( [0, np.iinfo("int64").min, np.iinfo("int64").max, None], type=pa.timestamp("ns"), ) ) expected = True expected_count = 1 assert_eq(expected, data.has_nulls) assert_eq(expected_count, data.null_count) def test_datetime_dataframe(): data = { "timearray": np.array( [0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]" ) } gdf = cudf.DataFrame(data) pdf = pd.DataFrame(data) assert_eq(pdf, gdf) assert_eq(pdf.dropna(), gdf.dropna()) assert_eq(pdf.isnull(), gdf.isnull()) data = np.array([0, 1, None, 2, 20, None, 897], dtype="datetime64[ms]") gs = cudf.Series(data) ps = pd.Series(data) assert_eq(ps, gs) assert_eq(ps.dropna(), gs.dropna()) assert_eq(ps.isnull(), gs.isnull()) @pytest.mark.parametrize( "data", [ None, [], pd.Series([], dtype="float64"), pd.Index([]), pd.Series([1, 2, 3]), pd.Series([0, 1, -1]), pd.Series([0, 1, -1, 100.3, 200, 47637289]), pd.Series(["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"]), [1, 2, 3, 100, -123, -1, 0, 1000000000000679367], pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}), pd.DataFrame( {"year": ["2015", "2016"], "month": ["2", "3"], "day": [4, 5]} ), pd.DataFrame( { "year": [2015, 2016], "month": [2, 3], "day": [4, 5], "minute": [1, 100], "second": [90, 10], "hour": [1, 0.5], }, index=["a", "b"], ), pd.DataFrame( { "year": [], "month": [], "day": [], "minute": [], "second": [], "hour": [], }, ), ["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"], pd.Index([1, 2, 3, 4]), pd.DatetimeIndex( ["1970-01-01 00:00:00.000000001", "1970-01-01 00:00:00.000000002"], dtype="datetime64[ns]", freq=None, ), pd.DatetimeIndex( [], dtype="datetime64[ns]", freq=None, ), pd.Series([1, 2, 3]).astype("datetime64[ns]"), pd.Series([1, 2, 3]).astype("datetime64[us]"), pd.Series([1, 2, 3]).astype("datetime64[ms]"), pd.Series([1, 2, 3]).astype("datetime64[s]"), pd.Series([1, 2, 3]).astype("datetime64[D]"), 1, 100, 17, 53.638435454, np.array([1, 10, 15, 478925, 2327623467]), np.array([0.3474673, -10, 15, 478925.34345, 2327623467]), ], ) @pytest.mark.parametrize("dayfirst", [True, False]) @pytest.mark.parametrize("infer_datetime_format", [True, False]) def test_cudf_to_datetime(data, dayfirst, infer_datetime_format): pd_data = data is_string_data = False if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)): gd_data = cudf.from_pandas(pd_data) is_string_data = ( gd_data.ndim == 1 and not gd_data.empty and gd_data.dtype.kind == "O" ) else: if type(pd_data).__module__ == np.__name__: gd_data = cp.array(pd_data) else: gd_data = pd_data is_string_data = isinstance(gd_data, list) and isinstance( next(iter(gd_data), None), str ) if dayfirst and not infer_datetime_format and is_string_data: # Note: pandas<2.0 also does not respect dayfirst=True correctly # for object data with pytest.raises(NotImplementedError): cudf.to_datetime( gd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format, ) else: expected = pd.to_datetime( pd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format, ) actual = cudf.to_datetime( gd_data, dayfirst=dayfirst, infer_datetime_format=infer_datetime_format, ) assert_eq(actual, expected) @pytest.mark.parametrize( "data", [ "2", ["1", "2", "3"], ["1/1/1", "2/2/2", "1"], pd.Series([1, 2, 3], dtype="timedelta64[ns]"), pd.DataFrame( { "year": [2015, 2016], "month": [2, 3], "day": [4, 5], "minute": [1, 100], "second": [90, 10], "hour": [1, 0], "blablacol": [1, 1], } ), pd.DataFrame( { "month": [2, 3], "day": [4, 5], "minute": [1, 100], "second": [90, 10], "hour": [1, 0], } ), ], ) def test_to_datetime_errors(data): pd_data = data if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)): gd_data = cudf.from_pandas(pd_data) else: gd_data = pd_data assert_exceptions_equal( pd.to_datetime, cudf.to_datetime, ([pd_data],), ([gd_data],), ) def test_to_datetime_not_implemented(): with pytest.raises(NotImplementedError): cudf.to_datetime([], exact=False) with pytest.raises(NotImplementedError): cudf.to_datetime([], origin="julian") with pytest.raises(NotImplementedError): cudf.to_datetime([], yearfirst=True) @pytest.mark.parametrize( "data", [ 1, [], pd.Series([], dtype="float64"), pd.Index([]), pd.Series([1, 2, 3]), pd.Series([1, 2.4, 3]), pd.Series([0, 1, -1]), pd.Series([0, 1, -1, 100, 200, 47637]), [10, 12, 1200, 15003], pd.DatetimeIndex( [], dtype="datetime64[ns]", freq=None, ), pd.Index([1, 2, 3, 4]), ], ) @pytest.mark.parametrize("unit", ["D", "s", "ms", "us", "ns"]) def test_to_datetime_units(data, unit): pd_data = data if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)): gd_data = cudf.from_pandas(pd_data) else: gd_data = pd_data expected = pd.to_datetime(pd_data, unit=unit) actual = cudf.to_datetime(gd_data, unit=unit) assert_eq(actual, expected) @pytest.mark.parametrize( "data,format", [ ("2012-10-11", None), ("2012-10-11", "%Y-%m-%d"), ("2012-10-11", "%Y-%d-%m"), (["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"], None), (["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"], "%Y-%m-%d"), (["2012-10-11", "2010-01-01", "2016-07-07", "2014-02-02"], "%Y-%d-%m"), (["10-11-2012", "01-01-2010", "07-07-2016", "02-02-2014"], "%m-%d-%Y"), (["10-11-2012", "01-01-2010", "07-07-2016", "02-02-2014"], "%d-%m-%Y"), (["10-11-2012", "01-01-2010", "07-07-2016", "02-02-2014"], None), (["2012/10/11", "2010/01/01", "2016/07/07", "2014/02/02"], None), (["2012/10/11", "2010/01/01", "2016/07/07", "2014/02/02"], "%Y/%m/%d"), (["2012/10/11", "2010/01/01", "2016/07/07", "2014/02/02"], "%Y/%d/%m"), (["10/11/2012", "01/01/2010", "07/07/2016", "02/02/2014"], "%m/%d/%Y"), (["10/11/2012", "01/01/2010", "07/07/2016", "02/02/2014"], "%d/%m/%Y"), (["10/11/2012", "01/01/2010", "07/07/2016", "02/02/2014"], None), (["2021-04-13 12:30:04.123456789"], "%Y-%m-%d %H:%M:%S.%f"), (pd.Series([2015, 2020, 2021]), "%Y"), pytest.param( pd.Series(["1", "2", "1"]), "%m", marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/6109" "https://github.com/pandas-dev/pandas/issues/35934" ), ), pytest.param( pd.Series(["14", "20", "10"]), "%d", marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/6109" "https://github.com/pandas-dev/pandas/issues/35934" ), ), (pd.Series([2015, 2020.0, 2021.2]), "%Y"), ], ) @pytest.mark.parametrize("infer_datetime_format", [True, False]) def test_to_datetime_format(data, format, infer_datetime_format): pd_data = data if isinstance(pd_data, (pd.Series, pd.DataFrame, pd.Index)): gd_data = cudf.from_pandas(pd_data) else: gd_data = pd_data expected = pd.to_datetime( pd_data, format=format, infer_datetime_format=infer_datetime_format ) actual = cudf.to_datetime( gd_data, format=format, infer_datetime_format=infer_datetime_format ) assert_eq(actual, expected) def test_to_datetime_data_out_of_range_for_format(): with pytest.raises(ValueError): cudf.to_datetime("2015-02-99", format="%Y-%m-%d") def test_to_datetime_different_formats_notimplemented(): with pytest.raises(NotImplementedError): cudf.to_datetime(["2015-02-01", "2015-02-01 10:10:10"]) def test_datetime_can_cast_safely(): sr = cudf.Series( ["1679-01-01", "2000-01-31", "2261-01-01"], dtype="datetime64[ms]" ) assert sr._column.can_cast_safely(np.dtype("datetime64[ns]")) sr = cudf.Series( ["1677-01-01", "2000-01-31", "2263-01-01"], dtype="datetime64[ms]" ) assert sr._column.can_cast_safely(np.dtype("datetime64[ns]")) is False # Cudf autocasts unsupported time_units @pytest.mark.parametrize( "dtype", ["datetime64[D]", "datetime64[W]", "datetime64[M]", "datetime64[Y]"], ) def test_datetime_array_timeunit_cast(dtype): testdata = np.array( [ np.datetime64("2016-11-20"), np.datetime64("2020-11-20"), np.datetime64("2019-11-20"), np.datetime64("1918-11-20"), np.datetime64("2118-11-20"), ], dtype=dtype, ) gs = Series(testdata) ps = pd.Series(testdata) assert_eq(ps, gs) gdf = DataFrame() gdf["a"] = np.arange(5) gdf["b"] = testdata pdf = pd.DataFrame() pdf["a"] = np.arange(5) pdf["b"] = testdata assert_eq(pdf, gdf) @pytest.mark.parametrize("timeunit", ["D", "W", "M", "Y"]) def test_datetime_scalar_timeunit_cast(timeunit): testscalar = np.datetime64("2016-11-20", timeunit) gs = Series(testscalar) ps = pd.Series(testscalar) assert_eq(ps, gs) gdf = DataFrame() gdf["a"] = np.arange(5) gdf["b"] = testscalar pdf = pd.DataFrame() pdf["a"] = np.arange(5) pdf["b"] = testscalar assert_eq(pdf, gdf) @pytest.mark.parametrize( "data", [ ["2001-01-01", "2002-02-02", "2000-01-05", "NaT"], ["2001-01-01", "2002-02-02", "2000-01-05", None], [None, None, None, None, None], ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) def test_str_null_to_datetime(data, dtype): psr = pd.Series(data) gsr = Series(data) assert_eq(psr.astype(dtype), gsr.astype(dtype)) def test_str_to_datetime_error(): psr = pd.Series(["2001-01-01", "2002-02-02", "2000-01-05", "None"]) gsr = Series(["2001-01-01", "2002-02-02", "2000-01-05", "None"]) assert_exceptions_equal( lfunc=psr.astype, rfunc=gsr.astype, lfunc_args_and_kwargs=(["datetime64[s]"],), rfunc_args_and_kwargs=(["datetime64[s]"],), check_exception_type=False, ) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 10, 100, 20000], [None] * 7, [10, 20, 30, None, 100, 200, None], [3223.234, 342.2332, 23423.23, 3343.23324, 23432.2323, 242.23, 233], ], ) @pytest.mark.parametrize( "other", [ [1, 2, 3, 4, 10, 100, 20000], [None] * 7, [10, 20, 30, None, 100, 200, None], [3223.234, 342.2332, 23423.23, 3343.23324, 23432.2323, 242.23, 233], np.datetime64("2005-02"), np.datetime64("2005-02-25"), np.datetime64("2005-02-25T03:30"), np.datetime64("nat"), ], ) @pytest.mark.parametrize("data_dtype", DATETIME_TYPES) @pytest.mark.parametrize("other_dtype", DATETIME_TYPES) def test_datetime_subtract(data, other, data_dtype, other_dtype): gsr = cudf.Series(data, dtype=data_dtype) psr = gsr.to_pandas() if isinstance(other, np.datetime64): gsr_other = other psr_other = other else: gsr_other = cudf.Series(other, dtype=other_dtype) psr_other = gsr_other.to_pandas() expected = psr - psr_other actual = gsr - gsr_other assert_eq(expected, actual) expected = psr_other - psr actual = gsr_other - gsr assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ], ) @pytest.mark.parametrize( "other_scalars", [ datetime.timedelta(days=768), datetime.timedelta(seconds=768), datetime.timedelta(microseconds=7), datetime.timedelta(minutes=447), datetime.timedelta(hours=447), datetime.timedelta(weeks=734), np.timedelta64(4, "s"), np.timedelta64(456, "D"), np.timedelta64(46, "h"), np.timedelta64("nat"), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64(1, "ns"), ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) @pytest.mark.parametrize( "op", ["add", "sub"], ) def test_datetime_series_ops_with_scalars(data, other_scalars, dtype, op): gsr = cudf.Series(data=data, dtype=dtype) psr = gsr.to_pandas() if op == "add": expected = psr + other_scalars actual = gsr + other_scalars elif op == "sub": expected = psr - other_scalars actual = gsr - other_scalars assert_eq(expected, actual) if op == "add": expected = other_scalars + psr actual = other_scalars + gsr assert_eq(expected, actual) elif op == "sub": assert_exceptions_equal( lfunc=operator.sub, rfunc=operator.sub, lfunc_args_and_kwargs=([other_scalars, psr],), rfunc_args_and_kwargs=([other_scalars, gsr],), ) @pytest.mark.parametrize("data", ["20110101", "20120101", "20130101"]) @pytest.mark.parametrize("other_scalars", ["20110101", "20120101", "20130101"]) @pytest.mark.parametrize("op", _cmpops) @pytest.mark.parametrize( "dtype", ["datetime64[ns]", "datetime64[us]", "datetime64[ms]", "datetime64[s]"], ) def test_datetime_series_cmpops_with_scalars(data, other_scalars, dtype, op): gsr = cudf.Series(data=data, dtype=dtype) psr = gsr.to_pandas() expect = op(psr, other_scalars) got = op(gsr, other_scalars) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [1000000, 200000, 3000000], [1000000, 200000, None], [], [None], [None, None, None, None, None], [12, 12, 22, 343, 4353534, 435342], np.array([10, 20, 30, None, 100]), cp.asarray([10, 20, 30, 100]), [1000000, 200000, 3000000], [1000000, 200000, None], [1], [12, 11, 232, 223432411, 2343241, 234324, 23234], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], [1.321, 1132.324, 23223231.11, 233.41, 0.2434, 332, 323], [12, 11, 2.32, 2234.32411, 2343.241, 23432.4, 23234], ], ) @pytest.mark.parametrize( "scalar", [ datetime.timedelta(days=768), datetime.timedelta(seconds=768), datetime.timedelta(microseconds=7), pytest.param(np.timedelta64("nat"), marks=pytest.mark.xfail), np.timedelta64(1, "s"), np.timedelta64(1, "ms"), np.timedelta64(1, "us"), np.timedelta64(1, "ns"), ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) @pytest.mark.parametrize("op", [np.add, np.subtract]) def test_datetime_series_ops_with_cudf_scalars(data, scalar, dtype, op): gsr = cudf.Series(data=data, dtype=dtype) psr = gsr.to_pandas() expect = op(psr, scalar) got = op(gsr, cudf.Scalar(scalar)) assert_eq(expect, got) def test_datetime_invalid_ops(): sr = cudf.Series([1, 2, 3], dtype="datetime64[ns]") psr = sr.to_pandas() assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([psr, pd.Timestamp(1513393355.5, unit="s")],), rfunc_args_and_kwargs=([sr, pd.Timestamp(1513393355.5, unit="s")],), ) assert_exceptions_equal( lfunc=operator.truediv, rfunc=operator.truediv, lfunc_args_and_kwargs=([psr, pd.Timestamp(1513393355.5, unit="s")],), rfunc_args_and_kwargs=([sr, pd.Timestamp(1513393355.5, unit="s")],), ) assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([psr, psr],), rfunc_args_and_kwargs=([sr, sr],), ) assert_exceptions_equal( lfunc=operator.floordiv, rfunc=operator.floordiv, lfunc_args_and_kwargs=([psr, psr],), rfunc_args_and_kwargs=([sr, sr],), ) assert_exceptions_equal( lfunc=operator.floordiv, rfunc=operator.floordiv, lfunc_args_and_kwargs=([psr, pd.Timestamp(1513393355.5, unit="s")],), rfunc_args_and_kwargs=([sr, pd.Timestamp(1513393355.5, unit="s")],), ) assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([psr, 1],), rfunc_args_and_kwargs=([sr, 1],), ) assert_exceptions_equal( lfunc=operator.truediv, rfunc=operator.truediv, lfunc_args_and_kwargs=([psr, "a"],), rfunc_args_and_kwargs=([sr, "a"],), ) assert_exceptions_equal( lfunc=operator.mul, rfunc=operator.mul, lfunc_args_and_kwargs=([psr, 1],), rfunc_args_and_kwargs=([sr, 1],), ) @pytest.mark.parametrize( "data", [ [], [1, 2, 3], [None, 1, 10, 11, None], [None, None, None, None, None], [None], ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) @pytest.mark.parametrize( "fill_value", [ np.datetime64("2005-02"), np.datetime64("2005-02-25"), np.datetime64("2005-02-25T03:30"), np.datetime64("nat"), "NaT", ], ) def test_datetime_fillna(data, dtype, fill_value): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() expected = psr.dropna() actual = sr.dropna() assert_eq(expected, actual) expected = psr.fillna(fill_value) actual = sr.fillna(fill_value) assert_eq(expected, actual) expected = expected.dropna() actual = actual.dropna() assert_eq(expected, actual) @pytest.mark.parametrize( "data", [[1, 2, 3, None], [], [100121, 1221312, 321312321, 1232131223]] ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) @pytest.mark.parametrize( "date_format", [ "%d - %m", "%y/%H", "%Y", "%I - %M / %S", "%f", "%j", "%p", "%w", "%U", "%W", "%G", "%u", "%V", "%b", "%B", "%a", "%A", ], ) def test_datetime_strftime(data, dtype, date_format): gsr = cudf.Series(data, dtype=dtype) psr = gsr.to_pandas() expected = psr.dt.strftime(date_format=date_format) actual = gsr.dt.strftime(date_format=date_format) assert_eq(expected, actual) @pytest.mark.parametrize("date_format", ["%c", "%x", "%X"]) def test_datetime_strftime_not_implemented_formats(date_format): gsr = cudf.Series([1, 2, 3], dtype="datetime64[ms]") with pytest.raises(NotImplementedError): gsr.dt.strftime(date_format=date_format) @pytest.mark.parametrize("data", [[1, 2, 3], [], [1, 20, 1000, None]]) @pytest.mark.parametrize("dtype", DATETIME_TYPES) @pytest.mark.parametrize("stat", ["mean", "quantile"]) def test_datetime_stats(data, dtype, stat): gsr = cudf.Series(data, dtype=dtype) psr = gsr.to_pandas() expected = getattr(psr, stat)() actual = getattr(gsr, stat)() if len(data) == 0: assert np.isnat(expected.to_numpy()) and np.isnat(actual.to_numpy()) else: assert_eq(expected, actual) @pytest.mark.parametrize("op", ["max", "min", "std", "median"]) @pytest.mark.parametrize( "data", [ [], [1, 2, 3, 100], [10, None, 100, None, None], [None, None, None], [1231], ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) def test_datetime_reductions(data, op, dtype): sr = cudf.Series(data, dtype=dtype) psr = sr.to_pandas() actual = getattr(sr, op)() with expect_warning_if( psr.size > 0 and psr.isnull().all() and op == "median", RuntimeWarning ): expected = getattr(psr, op)() if ( expected is pd.NaT and actual is pd.NaT or (np.isnat(expected.to_numpy()) and np.isnat(actual)) ): assert True else: assert_eq(expected, actual) @pytest.mark.parametrize("timezone", ["naive", "UTC"]) @pytest.mark.parametrize( "data", [ np.arange("2002-10-27T04:30", 4 * 60, 60, dtype="M8[m]"), np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[m]"), np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[ns]"), np.arange("2002-10-27T04:30", 10 * 60, 1, dtype="M8[us]"), np.arange("2002-10-27T04:30", 4 * 60, 60, dtype="M8[s]"), ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) def test_datetime_infer_format(data, timezone, dtype): ts_data = np.datetime_as_string(data, timezone=timezone) sr = cudf.Series(ts_data) if timezone == "naive": psr = pd.Series(ts_data) expected = psr.astype(dtype) actual = sr.astype(dtype) assert_eq(expected, actual) else: with pytest.raises(NotImplementedError): sr.astype(dtype) def test_dateoffset_instance_subclass_check(): assert not issubclass(pd.DateOffset, cudf.DateOffset) assert not isinstance(pd.DateOffset(), cudf.DateOffset) def test_datetime_to_datetime_error(): assert_exceptions_equal( lfunc=pd.to_datetime, rfunc=cudf.to_datetime, lfunc_args_and_kwargs=(["02-Oct-2017 09:30", "%d-%B-%Y %H:%M"],), rfunc_args_and_kwargs=(["02-Oct-2017 09:30", "%d-%B-%Y %H:%M"],), check_exception_type=False, ) def test_is_leap_year(): data = [ "2020-05-31 08:00:00", None, "1999-12-31 18:40:00", "2000-12-31 04:00:00", None, "1900-02-28 07:00:00", "1800-03-14 07:30:00", "2100-03-14 07:30:00", "1970-01-01 00:00:00", "1969-12-31 12:59:00", ] # Series ps = pd.Series(data, dtype="datetime64[s]") gs = cudf.from_pandas(ps) expect = ps.dt.is_leap_year got = gs.dt.is_leap_year assert_eq(expect, got) # DatetimeIndex pIndex = pd.DatetimeIndex(data) gIndex = cudf.from_pandas(pIndex) expect2 = pIndex.is_leap_year got2 = gIndex.is_leap_year assert_eq(expect2, got2) def test_quarter(): data = [ "2020-05-31 08:00:00", "1999-12-31 18:40:00", "2000-12-31 04:00:00", "1900-02-28 07:00:00", "1800-03-14 07:30:00", "2100-03-14 07:30:00", "1970-01-01 00:00:00", "1969-12-31 12:59:00", ] dtype = "datetime64[s]" # Series ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.quarter got = gs.dt.quarter assert_eq(expect, got, check_dtype=False) # DatetimeIndex pIndex = pd.DatetimeIndex(data) gIndex = cudf.from_pandas(pIndex) expect2 = pIndex.quarter got2 = gIndex.quarter assert_eq(expect2.values, got2.values) @pytest.mark.parametrize( "data", [ pd.Series([], dtype="datetime64[ns]"), pd.Series(pd.date_range("2010-01-01", "2010-02-01")), pd.Series([None, None], dtype="datetime64[ns]"), pd.Series("2020-05-31 08:00:00", dtype="datetime64[s]"), pd.Series( pd.date_range(start="2021-07-25", end="2021-07-30"), index=["a", "b", "c", "d", "e", "f"], ), ], ) def test_isocalendar_series(data): ps = data.copy() gs = cudf.from_pandas(ps) expect = ps.dt.isocalendar() got = gs.dt.isocalendar() assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "data", [ pd.DatetimeIndex([], dtype="datetime64[ns]"), pd.DatetimeIndex([None, None], dtype="datetime64[ns]"), pd.DatetimeIndex( [ "2020-05-31 08:00:00", "1999-12-31 18:40:00", "2000-12-31 04:00:00", ], dtype="datetime64[ns]", ), pd.DatetimeIndex(["2100-03-14 07:30:00"], dtype="datetime64[ns]"), ], ) def test_isocalendar_index(data): ps = data.copy() gs = cudf.from_pandas(ps) expect = ps.isocalendar() got = gs.isocalendar() assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize("dtype", DATETIME_TYPES) def test_days_in_months(dtype): nrows = 1000 data = dataset_generator.rand_dataframe( dtypes_meta=[ {"dtype": dtype, "null_frequency": 0.4, "cardinality": nrows} ], rows=nrows, use_threads=False, seed=23, ) ps = data.to_pandas()["0"] gs = cudf.from_pandas(ps) assert_eq(ps.dt.days_in_month, gs.dt.days_in_month) @pytest.mark.parametrize( "data", [ [ "2020-05-31", None, "1999-12-01", "2000-12-21", None, "1900-02-28", "1800-03-14", "2100-03-10", "1970-01-01", "1969-12-11", ] ], ) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_is_month_start(data, dtype): # Series ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.is_month_start got = gs.dt.is_month_start assert_eq(expect, got) ################################################################## # Date Range Tests # ################################################################## date_range_test_dates_start = [ "2000-02-13 08:41:06", # leap year "1996-11-21 04:05:30", # non leap year "1970-01-01 00:00:00", # unix epoch time 0 "1831-05-08 15:23:21", ] date_range_test_dates_end = [ "2000-02-13 08:41:06", # leap year "1996-11-21 04:05:30", # non leap year "1970-01-01 00:00:00", # unix epoch time 0 "1831-05-08 15:23:21", ] date_range_test_periods = [1, 10, 100] date_range_test_freq = [ {"months": 3, "years": 1}, pytest.param( {"hours": 10, "days": 57, "nanoseconds": 3}, marks=pytest.mark.xfail( condition=PANDAS_LT_140, reason="Pandas ignoring nanoseconds component. " "https://github.com/pandas-dev/pandas/issues/44393", ), ), "83D", "17h", "-680T", "110546s", pytest.param( "110546789L", marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="Pandas DateOffset ignores milliseconds. " "https://github.com/pandas-dev/pandas/issues/43371", ), ), "110546789248U", ] @pytest.fixture(params=date_range_test_dates_start[:]) def start(request): return request.param @pytest.fixture(params=date_range_test_dates_end[:]) def end(request): return request.param @pytest.fixture(params=date_range_test_periods[:]) def periods(request): return request.param @pytest.fixture(params=date_range_test_freq[:]) def freq(request): return request.param def test_date_range_start_end_periods(start, end, periods): expect = pd.date_range(start=start, end=end, periods=periods, name="a") got = cudf.date_range(start=start, end=end, periods=periods, name="a") np.testing.assert_allclose( expect.to_numpy().astype("int64"), got.to_pandas().to_numpy().astype("int64"), ) def test_date_range_start_end_freq(request, start, end, freq): request.applymarker( pytest.mark.xfail( condition=( start == "1831-05-08 15:23:21" and end == "1996-11-21 04:05:30" and freq == "110546789L" ), reason="https://github.com/rapidsai/cudf/issues/12133", ) ) if isinstance(freq, str): _gfreq = _pfreq = freq else: _gfreq = cudf.DateOffset(**freq) _pfreq = pd.DateOffset(**freq) expect = pd.date_range(start=start, end=end, freq=_pfreq, name="a") got = cudf.date_range(start=start, end=end, freq=_gfreq, name="a") np.testing.assert_allclose( expect.to_numpy().astype("int64"), got.to_pandas().to_numpy().astype("int64"), ) def test_date_range_start_freq_periods(start, freq, periods): if isinstance(freq, str): _gfreq = _pfreq = freq else: _gfreq = cudf.DateOffset(**freq) _pfreq = pd.DateOffset(**freq) expect = pd.date_range(start=start, periods=periods, freq=_pfreq, name="a") got = cudf.date_range(start=start, periods=periods, freq=_gfreq, name="a") np.testing.assert_allclose( expect.to_numpy().astype("int64"), got.to_pandas().to_numpy().astype("int64"), ) def test_date_range_end_freq_periods(request, end, freq, periods): request.applymarker( pytest.mark.xfail( condition=( "nanoseconds" in freq and periods != 1 and end == "1970-01-01 00:00:00" ), reason="https://github.com/pandas-dev/pandas/issues/46877", ) ) if isinstance(freq, str): _gfreq = _pfreq = freq else: _gfreq = cudf.DateOffset(**freq) _pfreq = pd.DateOffset(**freq) expect = pd.date_range(end=end, periods=periods, freq=_pfreq, name="a") got = cudf.date_range(end=end, periods=periods, freq=_gfreq, name="a") np.testing.assert_allclose( expect.to_numpy().astype("int64"), got.to_pandas().to_numpy().astype("int64"), ) def test_date_range_freq_does_not_divide_range(): expect = pd.date_range( "2001-01-01 00:00:00.000000", "2001-01-01 00:00:00.000010", freq="3us" ) got = cudf.date_range( "2001-01-01 00:00:00.000000", "2001-01-01 00:00:00.000010", freq="3us" ) np.testing.assert_allclose( expect.to_numpy().astype("int64"), got.to_pandas().to_numpy().astype("int64"), ) def test_date_range_raise_overflow(): # Fixed offset start = np.datetime64(np.iinfo("int64").max, "ns") periods = 2 freq = cudf.DateOffset(nanoseconds=1) with pytest.raises(pd._libs.tslibs.np_datetime.OutOfBoundsDatetime): cudf.date_range(start=start, periods=periods, freq=freq) # Non-fixed offset start = np.datetime64(np.iinfo("int64").max, "ns") periods = 2 freq = cudf.DateOffset(months=1) with pytest.raises(pd._libs.tslibs.np_datetime.OutOfBoundsDatetime): # Extending beyond the max value will trigger a warning when pandas # does an internal conversion to a Python built-in datetime.datetime # object, which only supports down to microsecond resolution. with pytest.warns(UserWarning): cudf.date_range(start=start, periods=periods, freq=freq) @pytest.mark.parametrize( "freqstr_unsupported", [ "1M", "2SM", "3MS", "4BM", "5CBM", "6SMS", "7BMS", "8CBMS", "Q", "2BQ", "3BQS", "10A", "10Y", "9BA", "9BY", "8AS", "8YS", "7BAS", "7BYS", "BH", "B", ], ) def test_date_range_raise_unsupported(freqstr_unsupported): s, e = "2001-01-01", "2008-01-31" pd.date_range(start=s, end=e, freq=freqstr_unsupported) with pytest.raises(ValueError, match="does not yet support"): cudf.date_range(start=s, end=e, freq=freqstr_unsupported) # We also check that these values are unsupported when using lowercase # characters. We exclude the value 3MS (every 3 month starts) because 3ms # is a valid frequency for every 3 milliseconds. if freqstr_unsupported != "3MS": freqstr_unsupported = freqstr_unsupported.lower() pd.date_range(start=s, end=e, freq=freqstr_unsupported) with pytest.raises(ValueError, match="does not yet support"): cudf.date_range(start=s, end=e, freq=freqstr_unsupported) ################################################################## # End of Date Range Test # ################################################################## @pytest.mark.parametrize( "data", [ [ "2020-05-31", "2020-02-29", None, "1999-12-01", "2000-12-21", None, "1900-02-28", "1800-03-14", "2100-03-10", "1970-01-01", "1969-12-11", ] ], ) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_is_month_end(data, dtype): # Series ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.is_month_end got = gs.dt.is_month_end assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [ "2020-05-31", None, "1999-12-01", "2000-12-21", None, "1900-01-01", "1800-03-14", "2100-03-10", "1970-01-01", "1969-12-11", "2017-12-30", "2017-12-31", "2018-01-01", ] ], ) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_is_year_start(data, dtype): ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.is_year_start got = gs.dt.is_year_start assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [ "2020-05-31", None, "1999-12-01", "2000-12-21", None, "1900-12-31", "1800-03-14", "2017-12-30", "2017-12-31", "2020-12-31 08:00:00", None, "1999-12-31 18:40:00", "2000-12-31 04:00:00", None, "1800-12-14 07:30:00", "2100-12-14 07:30:00", "2020-05-31", ] ], ) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_is_year_end(data, dtype): ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.is_year_end got = gs.dt.is_year_end assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [ "2020-05-01", "2020-05-31", "2020-02-29", None, "1999-12-01", "2000-12-21", None, "1900-02-28", "1800-03-14", "2100-03-10", "1970-04-1", "1970-01-01", "1969-12-11", "2020-12-31", ] ], ) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_is_quarter_start(data, dtype): # Series ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.is_quarter_start got = gs.dt.is_quarter_start assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [ "2020-05-01", "2020-05-31", "2020-02-29", None, "1999-12-01", "2000-12-21", None, "1900-02-28", "1800-03-14", "2100-03-10", "1970-04-1", "1970-01-01", "1969-12-11", "2020-12-31", ] ], ) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_is_quarter_end(data, dtype): # Series ps = pd.Series(data, dtype=dtype) gs = cudf.from_pandas(ps) expect = ps.dt.is_quarter_end got = gs.dt.is_quarter_end assert_eq(expect, got) def test_error_values(): s = cudf.Series([1, 2, 3], dtype="datetime64[ns]") with pytest.raises( NotImplementedError, match="DateTime Arrays is not yet implemented in cudf", ): s.values @pytest.mark.parametrize( "data", [ ( [ "2020-05-31 08:00:00", "1999-12-31 18:40:10", "2000-12-31 04:00:05", "1900-02-28 07:00:06", "1800-03-14 07:30:20", "2100-03-14 07:30:20", "1970-01-01 00:00:09", "1969-12-31 12:59:10", ] ) ], ) @pytest.mark.parametrize("time_type", DATETIME_TYPES) @pytest.mark.parametrize( "resolution", ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"] ) def test_ceil(data, time_type, resolution): gs = cudf.Series(data, dtype=time_type) ps = gs.to_pandas() expect = ps.dt.ceil(resolution) got = gs.dt.ceil(resolution) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ( [ "2020-05-31 08:00:00", "1999-12-31 18:40:10", "2000-12-31 04:00:05", "1900-02-28 07:00:06", "1800-03-14 07:30:20", "2100-03-14 07:30:20", "1970-01-01 00:00:09", "1969-12-31 12:59:10", ] ) ], ) @pytest.mark.parametrize("time_type", DATETIME_TYPES) @pytest.mark.parametrize( "resolution", ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"] ) def test_floor(data, time_type, resolution): gs = cudf.Series(data, dtype=time_type) ps = gs.to_pandas() expect = ps.dt.floor(resolution) got = gs.dt.floor(resolution) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ( [ "2020-05-31 08:00:00", "1999-12-31 18:40:10", "2000-12-31 04:00:05", "1900-02-28 07:00:06", "1800-03-14 07:30:20", "2100-03-14 07:30:20", "1970-01-01 00:00:09", "1969-12-31 12:59:10", ] ) ], ) @pytest.mark.parametrize("time_type", DATETIME_TYPES) @pytest.mark.parametrize( "resolution", ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"] ) def test_round(data, time_type, resolution): gs = cudf.Series(data, dtype=time_type) ps = gs.to_pandas() expect = ps.dt.round(resolution) got = gs.dt.round(resolution) assert_eq(expect, got) @pytest.mark.parametrize( "idx", [ pd.DatetimeIndex([]), pd.DatetimeIndex(["2010-05-31"]), pd.date_range("2000-01-01", "2000-12-31", periods=21), ], ) @pytest.mark.parametrize( "offset", [ "10Y", "6M", "M", "31D", "0H", "44640T", "44640min", "2678000S", "2678000000L", "2678000000ms", "2678000000000U", "2678000000000us", "2678000000000000N", "2678000000000000ns", ], ) def test_first(idx, offset): p = pd.Series(range(len(idx)), dtype="int64", index=idx) g = cudf.from_pandas(p) expect = p.first(offset=offset) got = g.first(offset=offset) assert_eq(expect, got) @pytest.mark.parametrize( # This test case tests correctness when start is end of month "idx, offset", [ ( pd.DatetimeIndex( [ "2020-01-31", "2020-02-15", "2020-02-29", "2020-03-15", "2020-03-31", "2020-04-15", "2020-04-30", ] ), "3M", ) ], ) def test_first_start_at_end_of_month(idx, offset): p = pd.Series(range(len(idx)), index=idx) g = cudf.from_pandas(p) expect = p.first(offset=offset) got = g.first(offset=offset) assert_eq(expect, got) @pytest.mark.parametrize( "idx", [ pd.DatetimeIndex([]), pd.DatetimeIndex(["2010-05-31"]), pd.date_range("2000-01-01", "2000-12-31", periods=21), ], ) @pytest.mark.parametrize( "offset", [ "10Y", "6M", "M", "31D", "0H", "44640T", "44640min", "2678000S", "2678000000L", "2678000000ms", "2678000000000U", "2678000000000us", "2678000000000000N", "2678000000000000ns", ], ) def test_last(idx, offset): p = pd.Series(range(len(idx)), dtype="int64", index=idx) g = cudf.from_pandas(p) expect = p.last(offset=offset) got = g.last(offset=offset) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [ "2020-01-31", "2020-02-15", "2020-02-29", "2020-03-15", "2020-03-31", "2020-04-15", "2020-04-30", ], [43534, 43543, 37897, 2000], ], ) @pytest.mark.parametrize("dtype", [None, "datetime64[ns]"]) def test_datetime_constructor(data, dtype): expected = pd.DatetimeIndex(data=data, dtype=dtype) actual = cudf.DatetimeIndex(data=data, dtype=dtype) assert_eq(expected, actual) expected = pd.DatetimeIndex(data=pd.Series(data), dtype=dtype) actual = cudf.DatetimeIndex(data=cudf.Series(data), dtype=dtype) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [pd.Timestamp("2001-01-01", tz="America/New_York")], pd.Series(["2001-01-01"], dtype="datetime64[ns, America/New_York]"), pd.Index(["2001-01-01"], dtype="datetime64[ns, America/New_York]"), ], ) def test_construction_from_tz_timestamps(data): with pytest.raises(NotImplementedError): _ = cudf.Series(data) with pytest.raises(NotImplementedError): _ = cudf.Index(data) with pytest.raises(NotImplementedError): _ = cudf.DatetimeIndex(data) with pytest.raises(NotImplementedError): cudf.CategoricalIndex(data) @pytest.mark.parametrize("op", _cmpops) def test_datetime_binop_tz_timestamp(op): s = cudf.Series([1, 2, 3], dtype="datetime64[ns]") pd_tz_timestamp = pd.Timestamp("1970-01-01 00:00:00.000000001", tz="utc") with pytest.raises(NotImplementedError): op(s, pd_tz_timestamp) date_scalar = datetime.datetime.now(datetime.timezone.utc) with pytest.raises(NotImplementedError): op(s, date_scalar) @pytest.mark.parametrize( "data1", [["20110101", "20120101", None, "20140101", None]] ) @pytest.mark.parametrize( "data2", [["20110101", "20120101", "20130101", None, None]] ) @pytest.mark.parametrize("op", _cmpops) def test_datetime_series_cmpops_pandas_compatibility(data1, data2, op): gsr1 = cudf.Series(data=data1, dtype="datetime64[ns]") psr1 = gsr1.to_pandas() gsr2 = cudf.Series(data=data2, dtype="datetime64[ns]") psr2 = gsr2.to_pandas() expect = op(psr1, psr2) with cudf.option_context("mode.pandas_compatible", True): got = op(gsr1, gsr2) assert_eq(expect, got) def test_datetime_getitem_na(): s = cudf.Series([1, 2, None, 3], dtype="datetime64[ns]") assert s[2] is cudf.NaT def test_daterange_pandas_compatibility(): with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.date_range("20010101", "20020215", freq="400h", name="times") expected = pd.date_range( "2010-01-01", "2010-02-01", periods=10, name="times" ) actual = cudf.date_range( "2010-01-01", "2010-02-01", periods=10, name="times" ) assert_eq(expected, actual) def test_strings_with_utc_offset_not_implemented(): with pytest.warns(DeprecationWarning, match="parsing timezone"): # cupy with pytest.raises(NotImplementedError): DatetimeIndex(["2022-07-22 00:00:00+02:00"]) @pytest.mark.parametrize("code", ["z", "Z"]) def test_format_timezone_not_implemented(code): with pytest.raises(NotImplementedError): cudf.to_datetime( ["2020-01-01 00:00:00 UTC"], format=f"%Y-%m-%d %H:%M:%S %{code}" ) @pytest.mark.parametrize("tz", ["Z", "UTC-3", "+01:00"]) def test_no_format_timezone_not_implemented(tz): with pytest.raises(NotImplementedError): cudf.to_datetime([f"2020-01-01 00:00:00{tz}"]) @pytest.mark.parametrize("arg", [True, False]) def test_args_not_datetime_typerror(arg): with pytest.raises(TypeError): cudf.to_datetime([arg]) @pytest.mark.parametrize( "data", [ [ "2000-01-01 00:00:00.000000000", "2000-01-01 00:00:00.000000000", "2000-01-01 00:00:00.000000000", ], [ "2000-01-01 00:00:00.000000000", None, "2000-01-01 00:00:00.000000000", ], [ "2000-01-01 00:00:00.001000000", "2000-01-01 00:00:00.000000000", "2000-01-01 00:00:00.000000000", ], [ "2000-01-01 00:00:00.010000000", "2000-01-01 00:00:00.020000000", "2000-01-01 00:00:00.030000000", ], [ "2000-01-01 00:00:00.010000000", "2000-01-01 00:00:00.020000000", None, ], [ "2000-01-01 00:00:00.000001000", "2000-01-01 00:00:00.000000000", "2000-01-01 00:00:00.000004000", ], [ None, "2000-01-01 00:00:00.000000000", "2000-01-01 00:00:00.000004000", ], [ "2000-01-01 00:00:00.000000010", "2000-01-01 00:00:00.000000002", "2000-01-01 00:00:00.000000000", ], [ "2000-01-01 00:00:00.000000010", None, "2000-01-01 00:00:00.000000000", ], [ "2000-01-01 00:00:01.000000000", "2000-01-01 00:00:40.000000000", "2000-01-01 00:00:59.000000000", ], [ "2000-01-01 00:10:00.000000000", "2000-01-01 00:30:40.000000000", "2000-01-01 00:59:00.000000000", ], [ "2000-01-01 07:00:00.000000000", "2000-01-01 08:00:00.000000000", None, ], [None, None, None], [], [ "2000-01-01 00:10:00.123456789", "2000-01-01 00:30:40.123123456", "2000-01-01 00:59:00.675347634", ], ], ) @pytest.mark.parametrize("dtype", DATETIME_TYPES) def test_datetime_to_str(data, dtype): gs = cudf.Series(data, dtype=dtype) ps = gs.to_pandas() with cudf.option_context("mode.pandas_compatible", True): actual = gs.astype("str") expected = ps.astype("string") assert_eq(actual.to_pandas(nullable=True), expected) def test_dateimeindex_from_noniso_string(): data = ["20160920", "20160925"] gdti = cudf.DatetimeIndex(data) pdti = pd.DatetimeIndex(data) assert_eq(gdti, pdti) @pytest.mark.parametrize("errors", ["coerce", "ignore"]) def test_to_datetime_errors_non_scalar_not_implemented(errors): with pytest.raises(NotImplementedError): cudf.to_datetime([1, ""], unit="s", errors=errors)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_interpolate.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. import pytest import cudf from cudf.testing._utils import assert_eq, assert_exceptions_equal @pytest.mark.parametrize( "data", [ # basics {"A": [1.0, 2.0, 3.0], "B": [4.0, 5.0, 6.0]}, {"A": [1.0, None, 3.0], "B": [4.0, None, 6.0]}, {"A": [None, 2.0, 3.0], "B": [4.0, 5.0, None]}, ], ) @pytest.mark.parametrize("method", ["linear"]) @pytest.mark.parametrize("axis", [0]) def test_interpolate_dataframe(data, method, axis): # Pandas interpolate methods do not seem to work # with nullable dtypes yet, so this method treats # NAs as NaNs # https://github.com/pandas-dev/pandas/issues/40252 gdf = cudf.DataFrame(data) pdf = gdf.to_pandas() expect = pdf.interpolate(method=method, axis=axis) got = gdf.interpolate(method=method, axis=axis) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [1.0, 2.0, 3.0], [1.0, None, 3.0], [None, 2.0, None, 4.0], [1.0, None, 3.0, None], [None, None, 3.0, 4.0], [1.0, 2.0, None, None], [None, None, None, None], [0.1, 0.2, 0.3], ], ) @pytest.mark.parametrize("method", ["linear"]) @pytest.mark.parametrize("axis", [0]) def test_interpolate_series(data, method, axis): gsr = cudf.Series(data) psr = gsr.to_pandas() expect = psr.interpolate(method=method, axis=axis) got = gsr.interpolate(method=method, axis=axis) assert_eq(expect, got, check_dtype=psr.dtype != "object") @pytest.mark.parametrize( "data,index", [([2.0, None, 4.0, None, 2.0], [1, 2, 3, 2, 1])] ) def test_interpolate_series_unsorted_index(data, index): gsr = cudf.Series(data, index=index) psr = gsr.to_pandas() expect = psr.interpolate(method="values") got = gsr.interpolate(method="values") assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [1.0, 2.0, 3.0, 4.0], [None, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, None], [None, None, 3.0, 4.0], [1.0, 2.0, None, None], [1.0, None, 3.0, None], [None, 2.0, None, 4.0], [None, None, None, None], ], ) @pytest.mark.parametrize("index", [[0, 1, 2, 3], [0, 2, 4, 6], [0, 3, 4, 9]]) @pytest.mark.parametrize("method", ["index", "values"]) def test_interpolate_series_values_or_index(data, index, method): gsr = cudf.Series(data, index=index) psr = gsr.to_pandas() expect = psr.interpolate(method=method) got = gsr.interpolate(method=method) assert_eq(expect, got, check_dtype=psr.dtype != "object") @pytest.mark.parametrize( "data,kwargs", [ ( {"A": ["a", "b", "c"], "B": ["d", "e", "f"]}, {"axis": 0, "method": "linear"}, ), ({"A": [1, 2, 3]}, {"method": "pad", "limit_direction": "backward"}), ({"A": [1, 2, 3]}, {"method": "ffill", "limit_direction": "backward"}), ({"A": [1, 2, 3]}, {"method": "bfill", "limit_direction": "forward"}), ( {"A": [1, 2, 3]}, {"method": "backfill", "limit_direction": "forward"}, ), ], ) def test_interpolate_dataframe_error_cases(data, kwargs): gsr = cudf.DataFrame(data) psr = gsr.to_pandas() assert_exceptions_equal( lfunc=psr.interpolate, rfunc=gsr.interpolate, lfunc_args_and_kwargs=([], kwargs), rfunc_args_and_kwargs=([], kwargs), )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_duplicates.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import itertools import random import numpy as np import pandas as pd import pytest import cudf from cudf import concat from cudf.testing._utils import ( _create_pandas_series_float64_default, assert_eq, assert_exceptions_equal, ) # TODO: PANDAS 1.0 support # Revisit drop_duplicates() tests to update parameters like ignore_index. def assert_df(g, p): # assert_eq() with sorted index of dataframes g = g.sort_index() p = p.sort_index() return assert_eq(g, p) def assert_df2(g, p): assert g.index.dtype == p.index.dtype np.testing.assert_equal(g.index.to_numpy(), p.index) assert tuple(g.columns) == tuple(p.columns) for k in g.columns: assert g[k].dtype == p[k].dtype np.testing.assert_equal(g[k].to_numpy(), p[k]) # most tests are similar to pandas drop_duplicates @pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]]) def test_duplicated_with_misspelled_column_name(subset): df = pd.DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]}) gdf = cudf.DataFrame.from_pandas(df) assert_exceptions_equal( lfunc=df.drop_duplicates, rfunc=gdf.drop_duplicates, lfunc_args_and_kwargs=([subset],), rfunc_args_and_kwargs=([subset],), ) @pytest.mark.parametrize("keep", ["first", "last", False]) @pytest.mark.parametrize( "data", [ [1, 2, 4, 5, 6, 6], [], ["a", "b", "s", "sd", "a", "b"], pd.Series(["aaa"] * 10, dtype="object"), ], ) def test_drop_duplicates_series(data, keep): pds = _create_pandas_series_float64_default(data) gds = cudf.from_pandas(pds) assert_df(pds.drop_duplicates(keep=keep), gds.drop_duplicates(keep=keep)) pds.drop_duplicates(keep=keep, inplace=True) gds.drop_duplicates(keep=keep, inplace=True) assert_df(pds, gds) def test_drop_duplicates(): pdf = pd.DataFrame( { "AAA": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], "B": ["one", "one", "two", "two", "two", "two", "one", "two"], "C": [1, 1, 2, 2, 2, 2, 1, 2], "D": range(8), } ) gdf = cudf.DataFrame.from_pandas(pdf) # single column result = gdf.copy() result.drop_duplicates("AAA", inplace=True) expected = pdf.copy() expected.drop_duplicates("AAA", inplace=True) assert_df(result, expected) result = gdf.drop_duplicates("AAA", keep="last") expected = pdf.drop_duplicates("AAA", keep="last") assert_df(result, expected) result = gdf.drop_duplicates("AAA", keep=False) expected = pdf.drop_duplicates("AAA", keep=False) assert_df(result, expected) assert len(result) == 0 # multi column expected = pdf.loc[[0, 1, 2, 3]] result = gdf.drop_duplicates(np.array(["AAA", "B"])) assert_df(result, expected) result = pdf.drop_duplicates(np.array(["AAA", "B"])) assert_df(result, expected) result = gdf.drop_duplicates(("AAA", "B"), keep="last") expected = pdf.drop_duplicates(("AAA", "B"), keep="last") assert_df(result, expected) result = gdf.drop_duplicates(("AAA", "B"), keep=False) expected = pdf.drop_duplicates(("AAA", "B"), keep=False) assert_df(result, expected) # consider everything df2 = gdf.loc[:, ["AAA", "B", "C"]] result = df2.drop_duplicates() # in this case only expected = df2.drop_duplicates(["AAA", "B"]) assert_df(result, expected) result = df2.drop_duplicates(keep="last") expected = df2.drop_duplicates(["AAA", "B"], keep="last") assert_df(result, expected) result = df2.drop_duplicates(keep=False) expected = df2.drop_duplicates(["AAA", "B"], keep=False) assert_df(result, expected) # integers result = gdf.drop_duplicates("C") expected = pdf.drop_duplicates("C") assert_df(result, expected) result = gdf.drop_duplicates("C", keep="last") expected = pdf.drop_duplicates("C", keep="last") assert_df(result, expected) gdf["E"] = gdf["C"].astype("int8") result = gdf.drop_duplicates("E") pdf["E"] = pdf["C"].astype("int8") expected = pdf.drop_duplicates("E") assert_df(result, expected) result = gdf.drop_duplicates("E", keep="last") expected = pdf.drop_duplicates("E", keep="last") assert_df(result, expected) pdf = pd.DataFrame( {"x": [7, 6, 3, 3, 4, 8, 0], "y": [0, 6, 5, 5, 9, 1, 2]} ) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) pdf = pd.DataFrame([[1, 0], [0, 2]]) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) pdf = pd.DataFrame([[-2, 0], [0, -4]]) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) x = np.iinfo(np.int64).max / 3 * 2 pdf = pd.DataFrame([[-x, x], [0, x + 4]]) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) pdf = pd.DataFrame([[-x, x], [x, x + 4]]) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) pdf = pd.DataFrame([i] * 9 for i in range(16)) pdf = pd.concat([pdf, pd.DataFrame([[1] + [0] * 8])], ignore_index=True) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) @pytest.mark.skip(reason="cudf does not support duplicate column names yet") def test_drop_duplicates_with_duplicate_column_names(): df = pd.DataFrame( [[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=["a", "a", "b"] ) df = cudf.DataFrame.from_pandas(df) result0 = df.drop_duplicates() assert_df(result0, df) result1 = df.drop_duplicates("a") expected1 = df[:2] assert_df(result1, expected1) def test_drop_duplicates_for_take_all(): pdf = pd.DataFrame( { "AAA": ["foo", "bar", "baz", "bar", "foo", "bar", "qux", "foo"], "B": ["one", "one", "two", "two", "two", "two", "one", "two"], "C": [1, 1, 2, 2, 2, 2, 1, 2], "D": range(8), } ) gdf = cudf.DataFrame.from_pandas(pdf) # single column result = gdf.drop_duplicates("AAA") expected = pdf.drop_duplicates("AAA") assert_df(result, expected) result = gdf.drop_duplicates("AAA", keep="last") expected = pdf.drop_duplicates("AAA", keep="last") assert_df(result, expected) result = gdf.drop_duplicates("AAA", keep=False) expected = pdf.drop_duplicates("AAA", keep=False) assert_df(result, expected) # multiple columns result = gdf.drop_duplicates(["AAA", "B"]) expected = pdf.drop_duplicates(["AAA", "B"]) assert_df(result, expected) result = gdf.drop_duplicates(["AAA", "B"], keep="last") expected = pdf.drop_duplicates(["AAA", "B"], keep="last") assert_df(result, expected) result = gdf.drop_duplicates(["AAA", "B"], keep=False) expected = pdf.drop_duplicates(["AAA", "B"], keep=False) assert_df(result, expected) def test_drop_duplicates_tuple(): pdf = pd.DataFrame( { ("AA", "AB"): [ "foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo", ], "B": ["one", "one", "two", "two", "two", "two", "one", "two"], "C": [1, 1, 2, 2, 2, 2, 1, 2], "D": range(8), } ) gdf = cudf.DataFrame.from_pandas(pdf) # single column result = gdf.drop_duplicates(("AA", "AB")) expected = pdf.drop_duplicates(("AA", "AB")) assert_df(result, expected) result = gdf.drop_duplicates(("AA", "AB"), keep="last") expected = pdf.drop_duplicates(("AA", "AB"), keep="last") assert_df(result, expected) result = gdf.drop_duplicates(("AA", "AB"), keep=False) expected = pdf.drop_duplicates(("AA", "AB"), keep=False) # empty df assert len(result) == 0 assert_df(result, expected) # multi column expected = pdf.drop_duplicates((("AA", "AB"), "B")) result = gdf.drop_duplicates((("AA", "AB"), "B")) assert_df(result, expected) @pytest.mark.parametrize( "df", [ pd.DataFrame(), pd.DataFrame(columns=[]), pd.DataFrame(columns=["A", "B", "C"]), pd.DataFrame(index=[]), pd.DataFrame(index=["A", "B", "C"]), ], ) def test_drop_duplicates_empty(df): df = cudf.DataFrame.from_pandas(df) result = df.drop_duplicates() assert_df(result, df) result = df.copy() result.drop_duplicates(inplace=True) assert_df(result, df) @pytest.mark.parametrize("num_columns", [3, 4, 5]) def test_dataframe_drop_duplicates_numeric_method(num_columns): comb = list(itertools.permutations(range(num_columns), num_columns)) shuf = list(comb) random.Random(num_columns).shuffle(shuf) def get_pdf(n_dup): # create dataframe with n_dup duplicate rows rows = comb + shuf[:n_dup] random.Random(n_dup).shuffle(rows) return pd.DataFrame(rows) for i in range(5): pdf = get_pdf(i) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) # subset columns, single columns assert_df( gdf.drop_duplicates(pdf.columns[:-1]), pdf.drop_duplicates(pdf.columns[:-1]), ) assert_df( gdf.drop_duplicates(pdf.columns[-1]), pdf.drop_duplicates(pdf.columns[-1]), ) assert_df( gdf.drop_duplicates(pdf.columns[0]), pdf.drop_duplicates(pdf.columns[0]), ) # subset columns shuffled cols = list(pdf.columns) random.Random(3).shuffle(cols) assert_df(gdf.drop_duplicates(cols), pdf.drop_duplicates(cols)) random.Random(3).shuffle(cols) assert_df(gdf.drop_duplicates(cols[:-1]), pdf.drop_duplicates(cols[:-1])) random.Random(3).shuffle(cols) assert_df(gdf.drop_duplicates(cols[-1]), pdf.drop_duplicates(cols[-1])) assert_df( gdf.drop_duplicates(cols, keep="last"), pdf.drop_duplicates(cols, keep="last"), ) def test_dataframe_drop_duplicates_method(): pdf = pd.DataFrame( [(1, 2, "a"), (2, 3, "b"), (3, 4, "c"), (2, 3, "d"), (3, 5, "c")], columns=["n1", "n2", "s1"], ) gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates(), pdf.drop_duplicates()) assert_eq( gdf.drop_duplicates("n1")["n1"].reset_index(drop=True), pdf.drop_duplicates("n1")["n1"].reset_index(drop=True), ) assert_eq( gdf.drop_duplicates("n2")["n2"].reset_index(drop=True), pdf.drop_duplicates("n2")["n2"].reset_index(drop=True), ) assert_eq( gdf.drop_duplicates("s1")["s1"].reset_index(drop=True), pdf.drop_duplicates("s1")["s1"].reset_index(drop=True), ) assert_eq( gdf.drop_duplicates("s1", keep="last")["s1"] .sort_index() .reset_index(drop=True), pdf.drop_duplicates("s1", keep="last")["s1"].reset_index(drop=True), ) assert gdf.drop_duplicates("s1", inplace=True) is None gdf = cudf.DataFrame.from_pandas(pdf) assert_df(gdf.drop_duplicates("n1"), pdf.drop_duplicates("n1")) assert_df(gdf.drop_duplicates("n2"), pdf.drop_duplicates("n2")) assert_df(gdf.drop_duplicates("s1"), pdf.drop_duplicates("s1")) assert_df( gdf.drop_duplicates(["n1", "n2"]), pdf.drop_duplicates(["n1", "n2"]) ) assert_df( gdf.drop_duplicates(["n1", "s1"]), pdf.drop_duplicates(["n1", "s1"]) ) # Test drop error assert_exceptions_equal( lfunc=pdf.drop_duplicates, rfunc=gdf.drop_duplicates, lfunc_args_and_kwargs=(["n3"],), rfunc_args_and_kwargs=(["n3"],), ) assert_exceptions_equal( lfunc=pdf.drop_duplicates, rfunc=gdf.drop_duplicates, lfunc_args_and_kwargs=([["n1", "n4", "n3"]],), rfunc_args_and_kwargs=([["n1", "n4", "n3"]],), ) def test_datetime_drop_duplicates(): date_df = cudf.DataFrame() date_df["date"] = pd.date_range("11/20/2018", periods=6, freq="D") date_df["value"] = np.random.sample(len(date_df)) df = concat([date_df, date_df[:4]]) assert_df(df[:-4], df.drop_duplicates()) df2 = df.reset_index() assert_df(df2[:-4], df2.drop_duplicates()) df3 = df.set_index("date") assert_df(df3[:-4], df3.drop_duplicates()) def test_drop_duplicates_NA(): # none df = pd.DataFrame( { "A": [None, None, "foo", "bar", "foo", "bar", "bar", "foo"], "B": ["one", "one", "two", "two", "two", "two", "one", "two"], "C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0], "D": range(8), } ) df = cudf.DataFrame.from_pandas(df) # single column result = df.drop_duplicates("A") expected = df.to_pandas().loc[[0, 2, 3]] assert_df(result, expected) result = df.drop_duplicates("A", keep="last") expected = df.to_pandas().loc[[1, 6, 7]] assert_df(result, expected) result = df.drop_duplicates("A", keep=False) expected = df.to_pandas().loc[[]] # empty df assert_df(result, expected) assert len(result) == 0 # multi column result = df.drop_duplicates(["A", "B"]) expected = df.to_pandas().loc[[0, 2, 3, 6]] assert_df(result, expected) result = df.drop_duplicates(["A", "B"], keep="last") expected = df.to_pandas().loc[[1, 5, 6, 7]] assert_df(result, expected) result = df.drop_duplicates(["A", "B"], keep=False) expected = df.to_pandas().loc[[6]] assert_df(result, expected) # nan df = pd.DataFrame( { "A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], "B": ["one", "one", "two", "two", "two", "two", "one", "two"], "C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0], "D": range(8), } ) df = cudf.DataFrame.from_pandas(df) # single column result = df.drop_duplicates("C") expected = df[:2] assert_df(result, expected) result = df.drop_duplicates("C", keep="last") expected = df.to_pandas().loc[[3, 7]] assert_df(result, expected) result = df.drop_duplicates("C", keep=False) expected = df.to_pandas().loc[[]] # empty df assert_df(result, expected) assert len(result) == 0 # multi column result = df.drop_duplicates(["C", "B"]) expected = df.to_pandas().loc[[0, 1, 2, 4]] assert_df(result, expected) result = df.drop_duplicates(["C", "B"], keep="last") expected = df.to_pandas().loc[[1, 3, 6, 7]] assert_df(result, expected) result = df.drop_duplicates(["C", "B"], keep=False) expected = df.to_pandas().loc[[1]] assert_df(result, expected) def test_drop_duplicates_NA_for_take_all(): # TODO: PANDAS 1.0 support - add ignore_index for # pandas drop_duplicates calls in this function. # none pdf = pd.DataFrame( { "A": [None, None, "foo", "bar", "foo", "baz", "bar", "qux"], "C": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0], } ) df = cudf.DataFrame.from_pandas(pdf) # single column result = df.drop_duplicates("A") expected = pdf.iloc[[0, 2, 3, 5, 7]] assert_df(result, expected) assert_df( df.drop_duplicates("A", ignore_index=True), result.reset_index(drop=True), ) result = df.drop_duplicates("A", keep="last") expected = pdf.iloc[[1, 4, 5, 6, 7]] assert_df(result, expected) assert_df( df.drop_duplicates("A", ignore_index=True, keep="last"), result.reset_index(drop=True), ) result = df.drop_duplicates("A", keep=False) expected = pdf.iloc[[5, 7]] assert_df(result, expected) assert_df( df.drop_duplicates("A", ignore_index=True, keep=False), result.reset_index(drop=True), ) # nan # single column result = df.drop_duplicates("C") expected = pdf.iloc[[0, 1, 5, 6]] assert_df(result, expected) result = df.drop_duplicates("C", keep="last") expected = pdf.iloc[[3, 5, 6, 7]] assert_df(result, expected) result = df.drop_duplicates("C", keep=False) expected = pdf.iloc[[5, 6]] assert_df(result, expected) def test_drop_duplicates_inplace(): orig = pd.DataFrame( { "A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"], "B": ["one", "one", "two", "two", "two", "two", "one", "two"], "C": [1, 1, 2, 2, 2, 2, 1, 2], "D": range(8), } ) orig = cudf.DataFrame.from_pandas(orig) # single column df = orig.copy() df.drop_duplicates("A", inplace=True) expected = orig[:2] result = df assert_df(result, expected) df = orig.copy() df.drop_duplicates("A", keep="last", inplace=True) expected = orig.loc[[6, 7]] result = df assert_df(result, expected) df = orig.copy() df.drop_duplicates("A", keep=False, inplace=True) expected = orig.loc[[]] result = df assert_df(result, expected) assert len(df) == 0 # multi column df = orig.copy() df.drop_duplicates(["A", "B"], inplace=True) expected = orig.loc[[0, 1, 2, 3]] result = df assert_df(result, expected) df = orig.copy() df.drop_duplicates(["A", "B"], keep="last", inplace=True) expected = orig.loc[[0, 5, 6, 7]] result = df assert_df(result, expected) df = orig.copy() df.drop_duplicates(["A", "B"], keep=False, inplace=True) expected = orig.loc[[0]] result = df assert_df(result, expected) # consider everything orig2 = orig.loc[:, ["A", "B", "C"]].copy() df2 = orig2.copy() df2.drop_duplicates(inplace=True) # in this case only expected = orig2.drop_duplicates(["A", "B"]) result = df2 assert_df(result, expected) df2 = orig2.copy() df2.drop_duplicates(keep="last", inplace=True) expected = orig2.drop_duplicates(["A", "B"], keep="last") result = df2 assert_df(result, expected) df2 = orig2.copy() df2.drop_duplicates(keep=False, inplace=True) expected = orig2.drop_duplicates(["A", "B"], keep=False) result = df2 assert_df(result, expected) def test_drop_duplicates_multi_index(): arrays = [ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"], ["one", "two", "one", "two", "one", "two", "one", "two"], ] idx = pd.MultiIndex.from_tuples(list(zip(*arrays)), names=["a", "b"]) pdf = pd.DataFrame(np.random.randint(0, 2, (8, 4)), index=idx) gdf = cudf.DataFrame.from_pandas(pdf) expected = pdf.drop_duplicates() result = gdf.drop_duplicates() assert_df(result.to_pandas(), expected) # FIXME: to_pandas needed until sort_index support for MultiIndex for col in gdf.columns: assert_df( gdf[col].drop_duplicates().to_pandas(), pdf[col].drop_duplicates(), ) def test_drop_duplicates_ignore_index_wrong_type(): gdf = cudf.DataFrame([1, 1, 2]) with pytest.raises(ValueError): gdf.drop_duplicates(ignore_index="True")
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_sparse_df.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import numpy as np from cudf import Series def test_to_dense_array(): data = np.random.random(8) mask = np.asarray([0b11010110]).astype(np.byte) sr = Series.from_masked_array(data=data, mask=mask, null_count=3) assert sr.has_nulls assert sr.null_count != len(sr) filled = sr.to_numpy(na_value=np.nan) dense = sr.dropna().to_numpy() assert dense.size < filled.size assert filled.size == len(sr)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/conftest.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import itertools import os import pathlib import cupy as cp import numpy as np import pytest import rmm # noqa: F401 import cudf from cudf.testing._utils import assert_eq _CURRENT_DIRECTORY = str(pathlib.Path(__file__).resolve().parent) @pytest.fixture(scope="session") def datadir(): return pathlib.Path(__file__).parent / "data" @pytest.fixture( params=itertools.product([0, 2, None], [0.3, None]), ids=lambda arg: f"n={arg[0]}-frac={arg[1]}", ) def sample_n_frac(request): """ Specific to `test_sample*` tests. """ n, frac = request.param if n is not None and frac is not None: pytest.skip("Cannot specify both n and frac.") return n, frac def shape_checker(expected, got): assert expected.shape == got.shape def exact_checker(expected, got): assert_eq(expected, got) @pytest.fixture( params=[ (None, None, shape_checker), (42, 42, shape_checker), (np.random.RandomState(42), np.random.RandomState(42), exact_checker), ], ids=["None", "IntSeed", "NumpyRandomState"], ) def random_state_tuple_axis_1(request): """ Specific to `test_sample*_axis_1` tests. A pytest fixture of valid `random_state` parameter pairs for pandas and cudf. Valid parameter combinations, and what to check for each pair are listed below: pandas: None, seed(int), np.random.RandomState cudf: None, seed(int), np.random.RandomState ------ check: shape, shape, exact result Each column above stands for one valid parameter combination and check. """ return request.param @pytest.fixture( params=[ (None, None, shape_checker), (42, 42, shape_checker), (np.random.RandomState(42), np.random.RandomState(42), exact_checker), (np.random.RandomState(42), cp.random.RandomState(42), shape_checker), ], ids=["None", "IntSeed", "NumpyRandomState", "CupyRandomState"], ) def random_state_tuple_axis_0(request): """ Specific to `test_sample*_axis_0` tests. A pytest fixture of valid `random_state` parameter pairs for pandas and cudf. Valid parameter combinations, and what to check for each pair are listed below: pandas: None, seed(int), np.random.RandomState, np.random.RandomState cudf: None, seed(int), np.random.RandomState, cp.random.RandomState ------ check: shape, shape, exact result, shape Each column above stands for one valid parameter combination and check. """ return request.param @pytest.fixture(params=[None, "builtin_list", "ndarray"]) def make_weights_axis_0(request): """Specific to `test_sample*_axis_0` tests. Only testing weights array that matches type with random state. """ if request.param is None: return lambda *_: (None, None) elif request.param == "builtin-list": return lambda size, _: ([1] * size, [1] * size) else: def wrapped(size, numpy_weights_for_cudf): # Uniform distribution, non-normalized if numpy_weights_for_cudf: return np.ones(size), np.ones(size) else: return np.ones(size), cp.ones(size) return wrapped # To set and remove the NO_EXTERNAL_ONLY_APIS environment variable we must use # the sessionstart and sessionfinish hooks rather than a simple autouse, # session-scope fixture because we need to set these variable before collection # occurs because the environment variable will be checked as soon as cudf is # imported anywhere. def pytest_sessionstart(session): """ Called after the Session object has been created and before performing collection and entering the run test loop. """ os.environ["NO_EXTERNAL_ONLY_APIS"] = "1" os.environ["_CUDF_TEST_ROOT"] = _CURRENT_DIRECTORY def pytest_sessionfinish(session, exitstatus): """ Called after whole test run finished, right before returning the exit status to the system. """ try: del os.environ["NO_EXTERNAL_ONLY_APIS"] del os.environ["_CUDF_TEST_ROOT"] except KeyError: pass @pytest.fixture(params=[32, 64]) def default_integer_bitwidth(request): old_default = cudf.get_option("default_integer_bitwidth") cudf.set_option("default_integer_bitwidth", request.param) yield request.param cudf.set_option("default_integer_bitwidth", old_default) @pytest.fixture(params=[32, 64]) def default_float_bitwidth(request): old_default = cudf.get_option("default_float_bitwidth") cudf.set_option("default_float_bitwidth", request.param) yield request.param cudf.set_option("default_float_bitwidth", old_default) @pytest.hookimpl(tryfirst=True, hookwrapper=True) def pytest_runtest_makereport(item, call): """Hook to make result information available in fixtures This makes it possible for a pytest.fixture to access the current test state through `request.node.report`. See the `manager` fixture in `test_spilling.py` for an example. Pytest doc: <https://docs.pytest.org/en/latest/example/simple.html> """ outcome = yield rep = outcome.get_result() # Set a report attribute for each phase of a call, which can # be "setup", "call", "teardown" setattr(item, "report", {rep.when: rep})
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_stats.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. from concurrent.futures import ThreadPoolExecutor import cupy as cp import numpy as np import pandas as pd import pytest import cudf from cudf.datasets import randomdata from cudf.testing._utils import ( _create_cudf_series_float64_default, _create_pandas_series_float64_default, assert_eq, assert_exceptions_equal, expect_warning_if, ) params_dtypes = [np.int32, np.uint32, np.float32, np.float64] methods = ["min", "max", "sum", "mean", "var", "std"] interpolation_methods = ["linear", "lower", "higher", "midpoint", "nearest"] @pytest.mark.parametrize("method", methods) @pytest.mark.parametrize("dtype", params_dtypes) @pytest.mark.parametrize("skipna", [True, False]) def test_series_reductions(method, dtype, skipna): np.random.seed(0) arr = np.random.random(100) if np.issubdtype(dtype, np.integer): arr *= 100 mask = arr > 10 else: mask = arr > 0.5 arr = arr.astype(dtype) if dtype in (np.float32, np.float64): arr[[2, 5, 14, 19, 50, 70]] = np.nan sr = cudf.Series(arr) sr[~mask] = None psr = sr.to_pandas() psr[~mask] = np.nan def call_test(sr, skipna): fn = getattr(sr, method) if method in ["std", "var"]: return fn(ddof=1, skipna=skipna) else: return fn(skipna=skipna) expect, got = call_test(psr, skipna=skipna), call_test(sr, skipna=skipna) np.testing.assert_approx_equal(expect, got) @pytest.mark.parametrize("method", methods) def test_series_reductions_concurrency(method): e = ThreadPoolExecutor(10) np.random.seed(0) srs = [cudf.Series(np.random.random(10000)) for _ in range(1)] def call_test(sr): fn = getattr(sr, method) if method in ["std", "var"]: return fn(ddof=1) else: return fn() def f(sr): return call_test(sr + 1) list(e.map(f, srs * 50)) @pytest.mark.parametrize("ddof", range(3)) def test_series_std(ddof): np.random.seed(0) arr = np.random.random(100) - 0.5 sr = cudf.Series(arr) pd = sr.to_pandas() got = sr.std(ddof=ddof) expect = pd.std(ddof=ddof) np.testing.assert_approx_equal(expect, got) def test_series_unique(): for size in [10**x for x in range(5)]: arr = np.random.randint(low=-1, high=10, size=size) mask = arr != -1 sr = cudf.Series(arr) sr[~mask] = None assert set(arr[mask]) == set(sr.unique().dropna().to_numpy()) assert len(set(arr[mask])) == sr.nunique() @pytest.mark.parametrize( "nan_as_null, dropna", [(True, True), (True, False), (False, True), (False, False)], ) def test_series_nunique(nan_as_null, dropna): # We remove nulls as opposed to NaNs using the dropna parameter, # so to test against pandas we replace NaN with another discrete value cudf_series = cudf.Series([1, 2, 2, 3, 3], nan_as_null=nan_as_null) pd_series = pd.Series([1, 2, 2, 3, 3]) expect = pd_series.nunique(dropna=dropna) got = cudf_series.nunique(dropna=dropna) assert expect == got cudf_series = cudf.Series( [1.0, 2.0, 3.0, np.nan, None], nan_as_null=nan_as_null ) if nan_as_null is True: pd_series = pd.Series([1.0, 2.0, 3.0, np.nan, None]) else: pd_series = pd.Series([1.0, 2.0, 3.0, -1.0, None]) expect = pd_series.nunique(dropna=dropna) got = cudf_series.nunique(dropna=dropna) assert expect == got cudf_series = cudf.Series([1.0, np.nan, np.nan], nan_as_null=nan_as_null) if nan_as_null is True: pd_series = pd.Series([1.0, np.nan, np.nan]) else: pd_series = pd.Series([1.0, -1.0, -1.0]) expect = pd_series.nunique(dropna=dropna) got = cudf_series.nunique(dropna=dropna) assert expect == got def test_series_scale(): arr = pd.Series(np.random.randint(low=-10, high=10, size=100)) sr = cudf.Series(arr) vmin = arr.min() vmax = arr.max() scaled = (arr - vmin) / (vmax - vmin) assert scaled.min() == 0 assert scaled.max() == 1 assert_eq(sr.scale(), scaled) @pytest.mark.parametrize("int_method", interpolation_methods) def test_exact_quantiles(int_method): arr = np.asarray([6.8, 0.15, 3.4, 4.17, 2.13, 1.11, -1.01, 0.8, 5.7]) quant_values = [0.0, 0.25, 0.33, 0.5, 1.0] df = pd.DataFrame(arr) gdf_series = cudf.Series(arr) q1 = gdf_series.quantile( quant_values, interpolation=int_method, exact=True ) q2 = df.quantile(quant_values, interpolation=int_method) np.testing.assert_allclose( q1.to_pandas().values, np.array(q2.values).T.flatten(), rtol=1e-10 ) @pytest.mark.parametrize("int_method", interpolation_methods) def test_exact_quantiles_int(int_method): arr = np.asarray([7, 0, 3, 4, 2, 1, -1, 1, 6]) quant_values = [0.0, 0.25, 0.33, 0.5, 1.0] df = pd.DataFrame(arr) gdf_series = cudf.Series(arr) q1 = gdf_series.quantile( quant_values, interpolation=int_method, exact=True ) q2 = df.quantile(quant_values, interpolation=int_method) np.testing.assert_allclose( q1.to_pandas().values, np.array(q2.values).T.flatten(), rtol=1e-10 ) def test_approx_quantiles(): arr = np.asarray([6.8, 0.15, 3.4, 4.17, 2.13, 1.11, -1.01, 0.8, 5.7]) quant_values = [0.0, 0.25, 0.33, 0.5, 1.0] gdf_series = cudf.Series(arr) pdf_series = pd.Series(arr) q1 = gdf_series.quantile(quant_values, exact=False) q2 = pdf_series.quantile(quant_values) assert_eq(q1, q2) def test_approx_quantiles_int(): arr = np.asarray([1, 2, 3]) quant_values = [0.5] approx_results = [2] gdf_series = cudf.Series(arr) q1 = gdf_series.quantile(quant_values, exact=False) assert approx_results == q1.to_pandas().values @pytest.mark.parametrize("data", [[], [1, 2, 3, 10, 326497]]) @pytest.mark.parametrize( "q", [ [], 0.5, 1, 0.234, [0.345], [0.243, 0.5, 1], np.array([0.5, 1]), cp.array([0.5, 1]), ], ) def test_misc_quantiles(data, q): pdf_series = _create_pandas_series_float64_default(data) gdf_series = _create_cudf_series_float64_default(data) expected = pdf_series.quantile(q.get() if isinstance(q, cp.ndarray) else q) actual = gdf_series.quantile(q) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ cudf.Series(np.random.normal(-100, 100, 1000)), cudf.Series(np.random.randint(-50, 50, 1000)), cudf.Series(np.zeros(100)), cudf.Series(np.repeat(np.nan, 100)), cudf.Series(np.array([1.123, 2.343, np.nan, 0.0])), cudf.Series( [5, 10, 53, None, np.nan, None, 12, 43, -423], nan_as_null=False ), cudf.Series([1.1032, 2.32, 43.4, 13, -312.0], index=[0, 4, 3, 19, 6]), cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @pytest.mark.parametrize("null_flag", [False, True]) @pytest.mark.parametrize("numeric_only", [False, True]) def test_kurtosis_series(data, null_flag, numeric_only): pdata = data.to_pandas() if null_flag and len(data) > 2: data.iloc[[0, 2]] = None pdata.iloc[[0, 2]] = None got = data.kurtosis(numeric_only=numeric_only) got = got if np.isscalar(got) else got.to_numpy() expected = pdata.kurtosis(numeric_only=numeric_only) np.testing.assert_array_almost_equal(got, expected) got = data.kurt(numeric_only=numeric_only) got = got if np.isscalar(got) else got.to_numpy() expected = pdata.kurt(numeric_only=numeric_only) np.testing.assert_array_almost_equal(got, expected) @pytest.mark.parametrize("op", ["skew", "kurt"]) def test_kurt_skew_error(op): gs = cudf.Series(["ab", "cd"]) ps = gs.to_pandas() with pytest.warns(FutureWarning): assert_exceptions_equal( getattr(gs, op), getattr(ps, op), lfunc_args_and_kwargs=([], {"numeric_only": True}), rfunc_args_and_kwargs=([], {"numeric_only": True}), ) @pytest.mark.parametrize( "data", [ cudf.Series(np.random.normal(-100, 100, 1000)), cudf.Series(np.random.randint(-50, 50, 1000)), cudf.Series(np.zeros(100)), cudf.Series(np.repeat(np.nan, 100)), cudf.Series(np.array([1.123, 2.343, np.nan, 0.0])), cudf.Series( [5, 10, 53, None, np.nan, None, 12, 43, -423], nan_as_null=False ), cudf.Series([1.1032, 2.32, 43.4, 13, -312.0], index=[0, 4, 3, 19, 6]), cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @pytest.mark.parametrize("null_flag", [False, True]) @pytest.mark.parametrize("numeric_only", [False, True]) def test_skew_series(data, null_flag, numeric_only): pdata = data.to_pandas() if null_flag and len(data) > 2: data.iloc[[0, 2]] = None pdata.iloc[[0, 2]] = None got = data.skew(numeric_only=numeric_only) expected = pdata.skew(numeric_only=numeric_only) got = got if np.isscalar(got) else got.to_numpy() np.testing.assert_array_almost_equal(got, expected) @pytest.mark.parametrize("dtype", params_dtypes) @pytest.mark.parametrize("num_na", [0, 1, 50, 99, 100]) def test_series_median(dtype, num_na): np.random.seed(0) arr = np.random.random(100) if np.issubdtype(dtype, np.integer): arr *= 100 mask = np.arange(100) >= num_na arr = arr.astype(dtype) sr = cudf.Series(arr) sr[~mask] = None arr2 = arr[mask] ps = pd.Series(arr2, dtype=dtype) actual = sr.median(skipna=True) desired = ps.median(skipna=True) np.testing.assert_approx_equal(actual, desired) # only for float until integer null supported convert to pandas in cudf # eg. pd.Int64Dtype if np.issubdtype(dtype, np.floating): ps = sr.to_pandas() actual = sr.median(skipna=False) desired = ps.median(skipna=False) np.testing.assert_approx_equal(actual, desired) @pytest.mark.parametrize( "data", [ np.random.normal(-100, 100, 1000), np.random.randint(-50, 50, 1000), np.zeros(100), np.array([1.123, 2.343, np.nan, 0.0]), np.array([-2, 3.75, 6, None, None, None, -8.5, None, 4.2]), cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @pytest.mark.parametrize("periods", range(-5, 5)) @pytest.mark.parametrize("fill_method", ["ffill", "bfill", "pad", "backfill"]) def test_series_pct_change(data, periods, fill_method): cs = cudf.Series(data) ps = cs.to_pandas() if np.abs(periods) <= len(cs): got = cs.pct_change(periods=periods, fill_method=fill_method) expected = ps.pct_change(periods=periods, fill_method=fill_method) np.testing.assert_array_almost_equal( got.to_numpy(na_value=np.nan), expected ) @pytest.mark.parametrize( "data1", [ np.random.normal(-100, 100, 1000), np.random.randint(-50, 50, 1000), np.zeros(100), np.repeat(np.nan, 100), np.array([1.123, 2.343, np.nan, 0.0]), cudf.Series([5, 10, 53, None, np.nan, None], nan_as_null=False), cudf.Series([1.1, 2.32, 43.4], index=[0, 4, 3]), cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @pytest.mark.parametrize( "data2", [ np.random.normal(-100, 100, 1000), np.random.randint(-50, 50, 1000), np.zeros(100), np.repeat(np.nan, 100), np.array([1.123, 2.343, np.nan, 0.0]), cudf.Series([1.1, 2.32, 43.4], index=[0, 500, 4000]), cudf.Series([5]), ], ) def test_cov1d(data1, data2): gs1 = cudf.Series(data1) gs2 = cudf.Series(data2) ps1 = gs1.to_pandas() ps2 = gs2.to_pandas() got = gs1.cov(gs2) ps1_align, ps2_align = ps1.align(ps2, join="inner") with expect_warning_if( (len(ps1_align.dropna()) == 1 and len(ps2_align.dropna()) > 0) or (len(ps2_align.dropna()) == 1 and len(ps1_align.dropna()) > 0), RuntimeWarning, ): expected = ps1.cov(ps2) np.testing.assert_approx_equal(got, expected, significant=8) @pytest.mark.parametrize( "data1", [ np.random.normal(-100, 100, 1000), np.random.randint(-50, 50, 1000), np.zeros(100), np.repeat(np.nan, 100), np.array([1.123, 2.343, np.nan, 0.0]), cudf.Series([5, 10, 53, None, np.nan, None], nan_as_null=False), cudf.Series([1.1032, 2.32, 43.4], index=[0, 4, 3]), cudf.Series([], dtype="float64"), cudf.Series([-3]), ], ) @pytest.mark.parametrize( "data2", [ np.random.normal(-100, 100, 1000), np.random.randint(-50, 50, 1000), np.zeros(100), np.repeat(np.nan, 100), np.array([1.123, 2.343, np.nan, 0.0]), cudf.Series([1.1, 2.32, 43.4], index=[0, 500, 4000]), cudf.Series([5]), ], ) @pytest.mark.parametrize("method", ["spearman", "pearson"]) def test_corr1d(data1, data2, method): if method == "spearman": # Pandas uses scipy.stats.spearmanr code-path pytest.importorskip("scipy") gs1 = cudf.Series(data1) gs2 = cudf.Series(data2) ps1 = gs1.to_pandas() ps2 = gs2.to_pandas() got = gs1.corr(gs2, method) ps1_align, ps2_align = ps1.align(ps2, join="inner") is_singular = ( len(ps1_align.dropna()) == 1 and len(ps2_align.dropna()) > 0 ) or (len(ps2_align.dropna()) == 1 and len(ps1_align.dropna()) > 0) is_identical = ( len(ps1_align.dropna().unique()) == 1 and len(ps2_align.dropna()) > 0 ) or ( len(ps2_align.dropna().unique()) == 1 and len(ps1_align.dropna()) > 0 ) # Pearson correlation leads to division by 0 when either sample size is 1. # Spearman allows for size 1 samples, but will error if all data in a # sample is identical since the covariance is zero and so the correlation # coefficient is not defined. cond = (is_singular and method == "pearson") or ( is_identical and not is_singular and method == "spearman" ) if method == "spearman": # SciPy has shuffled around the warning it throws a couple of times. # It's not worth the effort of conditionally importing the appropriate # warning based on the scipy version, just catching a base Warning is # good enough validation. expected_warning = Warning elif method == "pearson": expected_warning = RuntimeWarning with expect_warning_if(cond, expected_warning): expected = ps1.corr(ps2, method) np.testing.assert_approx_equal(got, expected, significant=8) @pytest.mark.parametrize("method", ["spearman", "pearson"]) def test_df_corr(method): gdf = randomdata(100, {str(x): float for x in range(50)}) pdf = gdf.to_pandas() got = gdf.corr(method) expected = pdf.corr(method) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ [0.0, 1, 3, 6, np.NaN, 7, 5.0, np.nan, 5, 2, 3, -100], [np.nan] * 3, [1, 5, 3], [], ], ) @pytest.mark.parametrize( "ops", [ "mean", "min", "max", "sum", "product", "var", "std", "prod", "kurtosis", "skew", "any", "all", "cummin", "cummax", "cumsum", "cumprod", ], ) @pytest.mark.parametrize("skipna", [True, False]) def test_nans_stats(data, ops, skipna): psr = _create_pandas_series_float64_default(data) gsr = _create_cudf_series_float64_default(data, nan_as_null=False) assert_eq( getattr(psr, ops)(skipna=skipna), getattr(gsr, ops)(skipna=skipna) ) gsr = _create_cudf_series_float64_default(data, nan_as_null=False) # Since there is no concept of `nan_as_null` in pandas, # nulls will be returned in the operations. So only # testing for `skipna=True` when `nan_as_null=False` assert_eq(getattr(psr, ops)(skipna=True), getattr(gsr, ops)(skipna=True)) @pytest.mark.parametrize( "data", [ [0.0, 1, 3, 6, np.NaN, 7, 5.0, np.nan, 5, 2, 3, -100], [np.nan] * 3, [1, 5, 3], ], ) @pytest.mark.parametrize("ops", ["sum", "product", "prod"]) @pytest.mark.parametrize("skipna", [True, False]) @pytest.mark.parametrize("min_count", [-10, -1, 0, 1, 2, 3, 5, 10]) def test_min_count_ops(data, ops, skipna, min_count): psr = pd.Series(data) gsr = cudf.Series(data, nan_as_null=False) assert_eq( getattr(psr, ops)(skipna=skipna, min_count=min_count), getattr(gsr, ops)(skipna=skipna, min_count=min_count), ) @pytest.mark.parametrize( "gsr", [ cudf.Series([1, 2, 3, 4], dtype="datetime64[ns]"), cudf.Series([1, 2, 3, 4], dtype="timedelta64[ns]"), ], ) def test_cov_corr_invalid_dtypes(gsr): psr = gsr.to_pandas() assert_exceptions_equal( lfunc=psr.corr, rfunc=gsr.corr, lfunc_args_and_kwargs=([psr],), rfunc_args_and_kwargs=([gsr],), ) assert_exceptions_equal( lfunc=psr.cov, rfunc=gsr.cov, lfunc_args_and_kwargs=([psr],), rfunc_args_and_kwargs=([gsr],), ) @pytest.mark.parametrize( "data", [ randomdata( nrows=1000, dtypes={"a": float, "b": int, "c": float, "d": str} ), ], ) @pytest.mark.parametrize("null_flag", [False, True]) def test_kurtosis_df(data, null_flag): pdata = data.to_pandas() if null_flag and len(data) > 2: data.iloc[[0, 2]] = None pdata.iloc[[0, 2]] = None with pytest.warns(FutureWarning): got = data.kurtosis() got = got if np.isscalar(got) else got.to_numpy() with pytest.warns(FutureWarning): expected = pdata.kurtosis() np.testing.assert_array_almost_equal(got, expected) with pytest.warns(FutureWarning): got = data.kurt() got = got if np.isscalar(got) else got.to_numpy() with pytest.warns(FutureWarning): expected = pdata.kurt() np.testing.assert_array_almost_equal(got, expected) got = data.kurt(numeric_only=True) got = got if np.isscalar(got) else got.to_numpy() expected = pdata.kurt(numeric_only=True) np.testing.assert_array_almost_equal(got, expected) @pytest.mark.parametrize( "data", [ randomdata( nrows=1000, dtypes={"a": float, "b": int, "c": float, "d": str} ), ], ) @pytest.mark.parametrize("null_flag", [False, True]) def test_skew_df(data, null_flag): pdata = data.to_pandas() if null_flag and len(data) > 2: data.iloc[[0, 2]] = None pdata.iloc[[0, 2]] = None with pytest.warns(FutureWarning): got = data.skew() with pytest.warns(FutureWarning): expected = pdata.skew() got = got if np.isscalar(got) else got.to_numpy() np.testing.assert_array_almost_equal(got, expected) got = data.skew(numeric_only=True) expected = pdata.skew(numeric_only=True) got = got if np.isscalar(got) else got.to_numpy() np.testing.assert_array_almost_equal(got, expected)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_column.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import cupy as cp import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf._lib.transform import mask_to_bools from cudf.core.column.column import as_column from cudf.testing._utils import assert_eq, assert_exceptions_equal from cudf.utils import dtypes as dtypeutils dtypes = sorted( list( dtypeutils.ALL_TYPES - { "datetime64[s]", "datetime64[ms]", "datetime64[us]", "timedelta64[s]", "timedelta64[ms]", "timedelta64[us]", } ) ) @pytest.fixture(params=dtypes, ids=dtypes) def pandas_input(request): dtype = request.param rng = np.random.default_rng() size = 100 def random_ints(dtype, size): dtype_min = np.iinfo(dtype).min dtype_max = np.iinfo(dtype).max return rng.integers(dtype_min, dtype_max, size=size, dtype=dtype) try: dtype = np.dtype(dtype) except TypeError: if dtype == "category": data = random_ints(np.int64, size) else: raise else: if dtype.kind == "b": data = rng.choice([False, True], size=size) elif dtype.kind in ("m", "M"): # datetime or timedelta data = random_ints(np.int64, size) elif dtype.kind == "U": # Unicode strings of integers like "12345" data = random_ints(np.int64, size).astype(dtype.str) elif dtype.kind == "f": # floats in [0.0, 1.0) data = rng.random(size=size, dtype=dtype) else: data = random_ints(dtype, size) return pd.Series(data, dtype=dtype) def str_host_view(list_of_str, to_dtype): return np.concatenate( [np.frombuffer(s.encode("utf-8"), dtype=to_dtype) for s in list_of_str] ) @pytest.mark.parametrize("offset", [0, 1, 15]) @pytest.mark.parametrize("size", [50, 10, 0]) def test_column_offset_and_size(pandas_input, offset, size): col = cudf.core.column.as_column(pandas_input) col = cudf.core.column.build_column( data=col.base_data, dtype=col.dtype, mask=col.base_mask, size=size, offset=offset, children=col.base_children, ) if cudf.api.types.is_categorical_dtype(col.dtype): assert col.size == col.codes.size assert col.size == (col.codes.data.size / col.codes.dtype.itemsize) elif cudf.api.types.is_string_dtype(col.dtype): if col.size > 0: assert col.size == (col.children[0].size - 1) assert col.size == ( (col.children[0].data.size / col.children[0].dtype.itemsize) - 1 ) else: assert col.size == (col.data.size / col.dtype.itemsize) got = cudf.Series(col) if offset is None: offset = 0 if size is None: size = 100 else: size = size + offset slicer = slice(offset, size) expect = pandas_input.iloc[slicer].reset_index(drop=True) assert_eq(expect, got) def column_slicing_test(col, offset, size, cast_to_float=False): col_slice = col.slice(offset, offset + size) series = cudf.Series(col) sliced_series = cudf.Series(col_slice) if cast_to_float: pd_series = series.astype(float).to_pandas() sliced_series = sliced_series.astype(float) else: pd_series = series.to_pandas() if cudf.api.types.is_categorical_dtype(col.dtype): # The cudf.Series is constructed from an already sliced column, whereas # the pandas.Series is constructed from the unsliced series and then # sliced, so the indexes should be different and we must ignore it. # However, we must compare these as frames, not raw arrays, because # numpy comparison of categorical values won't work. assert_eq( pd_series[offset : offset + size].reset_index(drop=True), sliced_series.reset_index(drop=True), ) else: assert_eq( np.asarray(pd_series[offset : offset + size]), sliced_series.to_numpy(), ) @pytest.mark.parametrize("offset", [0, 1, 15]) @pytest.mark.parametrize("size", [50, 10, 0]) def test_column_slicing(pandas_input, offset, size): col = cudf.core.column.as_column(pandas_input) column_slicing_test(col, offset, size) @pytest.mark.parametrize("offset", [0, 1, 15]) @pytest.mark.parametrize("size", [50, 10, 0]) @pytest.mark.parametrize("precision", [2, 3, 5]) @pytest.mark.parametrize("scale", [0, 1, 2]) @pytest.mark.parametrize( "decimal_type", [cudf.Decimal128Dtype, cudf.Decimal64Dtype, cudf.Decimal32Dtype], ) def test_decimal_column_slicing(offset, size, precision, scale, decimal_type): col = cudf.core.column.as_column(pd.Series(np.random.rand(1000))) col = col.astype(decimal_type(precision, scale)) column_slicing_test(col, offset, size, True) @pytest.mark.parametrize( "data", [ np.array([[23, 68, 2, 38, 9, 83, 72, 6, 98, 30]]), np.array([[1, 2], [7, 6]]), ], ) def test_column_series_multi_dim(data): with pytest.raises(ValueError): cudf.Series(data) with pytest.raises(ValueError): cudf.core.column.as_column(data) @pytest.mark.parametrize( ("data", "error"), [ ([1, "1.0", "2", -3], pa.lib.ArrowInvalid), ([np.nan, 0, "null", cp.nan], pa.lib.ArrowInvalid), ( [np.int32(4), np.float64(1.5), np.float32(1.290994), np.int8(0)], None, ), ], ) def test_column_mixed_dtype(data, error): if error is None: cudf.Series(data) else: with pytest.raises(error): cudf.Series(data) @pytest.mark.parametrize("nan_as_null", [True, False]) @pytest.mark.parametrize( "scalar", [np.nan, pd.Timedelta(days=1), pd.Timestamp(2020, 1, 1)], ids=repr, ) @pytest.mark.parametrize("size", [1, 10]) def test_as_column_scalar_with_nan(nan_as_null, scalar, size): expected = ( cudf.Series([scalar] * size, nan_as_null=nan_as_null) .dropna() .to_numpy() ) got = ( cudf.Series(as_column(scalar, length=size, nan_as_null=nan_as_null)) .dropna() .to_numpy() ) np.testing.assert_equal(expected, got) @pytest.mark.parametrize("data", [[1.1, 2.2, 3.3, 4.4], [1, 2, 3, 4]]) @pytest.mark.parametrize("dtype", ["float32", "float64"]) def test_column_series_cuda_array_dtype(data, dtype): psr = pd.Series(np.asarray(data), dtype=dtype) sr = cudf.Series(cp.asarray(data), dtype=dtype) assert_eq(psr, sr) psr = pd.Series(data, dtype=dtype) sr = cudf.Series(data, dtype=dtype) assert_eq(psr, sr) def test_column_zero_length_slice(): # see https://github.com/rapidsai/cudf/pull/4777 from numba import cuda x = cudf.DataFrame({"a": [1]}) the_column = x[1:]["a"]._column expect = np.array([], dtype="int8") got = cuda.as_cuda_array(the_column.data).copy_to_host() np.testing.assert_array_equal(expect, got) def test_column_chunked_array_creation(): pyarrow_array = pa.array([1, 2, 3] * 1000) chunked_array = pa.chunked_array(pyarrow_array) actual_column = cudf.core.column.as_column(chunked_array, dtype="float") expected_column = cudf.core.column.as_column(pyarrow_array, dtype="float") assert_eq(cudf.Series(actual_column), cudf.Series(expected_column)) actual_column = cudf.core.column.as_column(chunked_array) expected_column = cudf.core.column.as_column(pyarrow_array) assert_eq(cudf.Series(actual_column), cudf.Series(expected_column)) @pytest.mark.parametrize( "data,from_dtype,to_dtype", [ # equal size different kind (np.arange(3), "int64", "float64"), (np.arange(3), "float32", "int32"), (np.arange(1), "int64", "datetime64[ns]"), # size / 2^n should work for all n (np.arange(3), "int64", "int32"), (np.arange(3), "int64", "int16"), (np.arange(3), "int64", "int8"), (np.arange(3), "float64", "float32"), # evenly divides into bigger type (np.arange(8), "int8", "int64"), (np.arange(16), "int8", "int64"), (np.arange(128), "int8", "int64"), (np.arange(2), "float32", "int64"), (np.arange(8), "int8", "datetime64[ns]"), (np.arange(16), "int8", "datetime64[ns]"), ], ) def test_column_view_valid_numeric_to_numeric(data, from_dtype, to_dtype): cpu_data = np.asarray(data, dtype=from_dtype) gpu_data = as_column(data, dtype=from_dtype) cpu_data_view = cpu_data.view(to_dtype) gpu_data_view = gpu_data.view(to_dtype) expect = pd.Series(cpu_data_view, dtype=cpu_data_view.dtype) got = cudf.Series(gpu_data_view, dtype=gpu_data_view.dtype) gpu_ptr = gpu_data.data.get_ptr(mode="read") assert gpu_ptr == got._column.data.get_ptr(mode="read") assert_eq(expect, got) @pytest.mark.parametrize( "data,from_dtype,to_dtype", [ (np.arange(9), "int8", "int64"), (np.arange(3), "int8", "int16"), (np.arange(6), "int8", "float32"), (np.arange(1), "int8", "datetime64[ns]"), ], ) def test_column_view_invalid_numeric_to_numeric(data, from_dtype, to_dtype): cpu_data = np.asarray(data, dtype=from_dtype) gpu_data = as_column(data, dtype=from_dtype) assert_exceptions_equal( lfunc=cpu_data.view, rfunc=gpu_data.view, lfunc_args_and_kwargs=([to_dtype],), rfunc_args_and_kwargs=([to_dtype],), ) @pytest.mark.parametrize( "data,to_dtype", [ (["a", "b", "c"], "int8"), (["ab"], "int8"), (["ab"], "int16"), (["a", "ab", "a"], "int8"), (["abcd", "efgh"], "float32"), (["abcdefgh"], "datetime64[ns]"), ], ) def test_column_view_valid_string_to_numeric(data, to_dtype): expect = cudf.Series(cudf.Series(data)._column.view(to_dtype)) got = cudf.Series(str_host_view(data, to_dtype)) assert_eq(expect, got) def test_column_view_nulls_widths_even(): data = [1, 2, None, 4, None] expect_data = [ np.int32(val).view("float32") if val is not None else np.nan for val in data ] sr = cudf.Series(data, dtype="int32") expect = cudf.Series(expect_data, dtype="float32") got = cudf.Series(sr._column.view("float32")) assert_eq(expect, got) data = [None, 2.1, None, 5.3, 8.8] expect_data = [ np.float64(val).view("int64") if val is not None else val for val in data ] sr = cudf.Series(data, dtype="float64") expect = cudf.Series(expect_data, dtype="int64") got = cudf.Series(sr._column.view("int64")) assert_eq(expect, got) @pytest.mark.parametrize("slc", [slice(1, 5), slice(0, 4), slice(2, 4)]) def test_column_view_numeric_slice(slc): data = np.array([1, 2, 3, 4, 5], dtype="int32") sr = cudf.Series(data) expect = cudf.Series(data[slc].view("int64")) got = cudf.Series(sr._column.slice(slc.start, slc.stop).view("int64")) assert_eq(expect, got) @pytest.mark.parametrize( "slc", [slice(3, 5), slice(0, 4), slice(2, 5), slice(1, 3)] ) def test_column_view_string_slice(slc): data = ["a", "bcde", "cd", "efg", "h"] expect = cudf.Series( cudf.Series(data)._column.slice(slc.start, slc.stop).view("int8") ) got = cudf.Series(str_host_view(data[slc], "int8")) assert_eq(expect, got) @pytest.mark.parametrize( "data,expected", [ ( np.array([1, 2, 3, 4, 5], dtype="uint8"), cudf.core.column.as_column([1, 2, 3, 4, 5], dtype="uint8"), ), ( cp.array([1, 2, 3, 4, 5], dtype="uint8"), cudf.core.column.as_column([1, 2, 3, 4, 5], dtype="uint8"), ), ( cp.array([], dtype="uint8"), cudf.core.column.as_column([], dtype="uint8"), ), ( cp.array([255], dtype="uint8"), cudf.core.column.as_column([255], dtype="uint8"), ), ], ) def test_as_column_buffer(data, expected): actual_column = cudf.core.column.as_column( cudf.core.buffer.as_buffer(data), dtype=data.dtype ) assert_eq(cudf.Series(actual_column), cudf.Series(expected)) @pytest.mark.parametrize( "data,pyarrow_kwargs,cudf_kwargs", [ ( [100, 200, 300], {"type": pa.decimal128(3)}, {"dtype": cudf.core.dtypes.Decimal128Dtype(3, 0)}, ), ( [{"a": 1, "b": 3}, {"c": 2, "d": 4}], {}, {}, ), ( [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], {}, {}, ), ], ) def test_as_column_arrow_array(data, pyarrow_kwargs, cudf_kwargs): pyarrow_data = pa.array(data, **pyarrow_kwargs) cudf_from_pyarrow = as_column(pyarrow_data) expected = as_column(data, **cudf_kwargs) assert_eq(cudf.Series(cudf_from_pyarrow), cudf.Series(expected)) @pytest.mark.parametrize( "pd_dtype,expect_dtype", [ # TODO: Nullable float is coming (pd.StringDtype(), np.dtype("O")), (pd.UInt8Dtype(), np.dtype("uint8")), (pd.UInt16Dtype(), np.dtype("uint16")), (pd.UInt32Dtype(), np.dtype("uint32")), (pd.UInt64Dtype(), np.dtype("uint64")), (pd.Int8Dtype(), np.dtype("int8")), (pd.Int16Dtype(), np.dtype("int16")), (pd.Int32Dtype(), np.dtype("int32")), (pd.Int64Dtype(), np.dtype("int64")), (pd.BooleanDtype(), np.dtype("bool")), ], ) def test_build_df_from_nullable_pandas_dtype(pd_dtype, expect_dtype): if pd_dtype == pd.StringDtype(): data = ["a", pd.NA, "c", pd.NA, "e"] elif pd_dtype == pd.BooleanDtype(): data = [True, pd.NA, False, pd.NA, True] else: data = [1, pd.NA, 3, pd.NA, 5] pd_data = pd.DataFrame.from_dict({"a": data}, dtype=pd_dtype) gd_data = cudf.DataFrame.from_pandas(pd_data) assert gd_data["a"].dtype == expect_dtype # check mask expect_mask = [x is not pd.NA for x in pd_data["a"]] got_mask = mask_to_bools( gd_data["a"]._column.base_mask, 0, len(gd_data) ).values_host np.testing.assert_array_equal(expect_mask, got_mask) @pytest.mark.parametrize( "pd_dtype,expect_dtype", [ # TODO: Nullable float is coming (pd.StringDtype(), np.dtype("O")), (pd.UInt8Dtype(), np.dtype("uint8")), (pd.UInt16Dtype(), np.dtype("uint16")), (pd.UInt32Dtype(), np.dtype("uint32")), (pd.UInt64Dtype(), np.dtype("uint64")), (pd.Int8Dtype(), np.dtype("int8")), (pd.Int16Dtype(), np.dtype("int16")), (pd.Int32Dtype(), np.dtype("int32")), (pd.Int64Dtype(), np.dtype("int64")), (pd.BooleanDtype(), np.dtype("bool")), ], ) def test_build_series_from_nullable_pandas_dtype(pd_dtype, expect_dtype): if pd_dtype == pd.StringDtype(): data = ["a", pd.NA, "c", pd.NA, "e"] elif pd_dtype == pd.BooleanDtype(): data = [True, pd.NA, False, pd.NA, True] else: data = [1, pd.NA, 3, pd.NA, 5] pd_data = pd.Series(data, dtype=pd_dtype) gd_data = cudf.Series.from_pandas(pd_data) assert gd_data.dtype == expect_dtype # check mask expect_mask = [x is not pd.NA for x in pd_data] got_mask = mask_to_bools( gd_data._column.base_mask, 0, len(gd_data) ).values_host np.testing.assert_array_equal(expect_mask, got_mask) def test_concatenate_large_column_strings(): num_strings = 1_000_000 string_scale_f = 100 s_1 = cudf.Series(["very long string " * string_scale_f] * num_strings) s_2 = cudf.Series(["very long string " * string_scale_f] * num_strings) with pytest.raises(OverflowError): cudf.concat([s_1, s_2]) @pytest.mark.parametrize( "alias,expect_dtype", [ ("UInt8", "uint8"), ("UInt16", "uint16"), ("UInt32", "uint32"), ("UInt64", "uint64"), ("Int8", "int8"), ("Int16", "int16"), ("Int32", "int32"), ("Int64", "int64"), ("boolean", "bool"), ("Float32", "float32"), ("Float64", "float64"), ], ) @pytest.mark.parametrize( "data", [[1, 2, 0]], ) def test_astype_with_aliases(alias, expect_dtype, data): pd_data = pd.Series(data) gd_data = cudf.Series.from_pandas(pd_data) assert_eq(pd_data.astype(expect_dtype), gd_data.astype(alias))
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_extension_compilation.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. import operator import cupy as cp import numpy as np import pytest from numba import cuda, types from numba.cuda import compile_ptx from numba.np.numpy_support import from_dtype from cudf import NA from cudf.core.udf.api import Masked from cudf.core.udf.masked_typing import MaskedType from cudf.testing._utils import parametrize_numeric_dtypes_pairwise from cudf.utils._numba import _CUDFNumbaConfig arith_ops = ( operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv, operator.mod, operator.pow, ) comparison_ops = ( operator.lt, operator.le, operator.eq, operator.ne, operator.ge, operator.gt, ) unary_ops = (operator.truth,) ops = arith_ops + comparison_ops number_types = ( types.float32, types.float64, types.int8, types.int16, types.int32, types.int64, types.uint8, types.uint16, types.uint32, types.uint64, ) QUICK = False if QUICK: arith_ops = (operator.add, operator.truediv, operator.pow) number_types = (types.int32, types.float32) number_ids = tuple(str(t) for t in number_types) @pytest.mark.parametrize("op", unary_ops) @pytest.mark.parametrize("ty", number_types, ids=number_ids) def test_compile_masked_unary(op, ty): def func(x): return op(x) cc = (7, 5) ptx, resty = compile_ptx(func, (MaskedType(ty),), cc=cc, device=True) @pytest.mark.parametrize("op", arith_ops) @pytest.mark.parametrize("ty", number_types, ids=number_ids) def test_execute_masked_binary(op, ty): @cuda.jit(device=True) def func(x, y): return op(x, y) @cuda.jit def test_kernel(x, y, err): # Reference result with unmasked value u = func(x, y) # Construct masked values to test with x0, y0 = Masked(x, False), Masked(y, False) x1, y1 = Masked(x, True), Masked(y, True) # Call with masked types r0 = func(x0, y0) r1 = func(x1, y1) # Check masks are as expected, and unmasked result matches masked # result if r0.valid: # TODO: ideally, we would raise an exception here rather # than return an "error code", and that is what the # previous version of this (and below) tests did. But, # Numba kernels cannot currently use `debug=True` with # CUDA enhanced compatibility. Once a solution to that is # reached, we should switch back to raising exceptions # here. err[0] = 1 if not r1.valid: err[0] = 2 if u != r1.value: err[0] = 3 err = cp.asarray([0], dtype="int8") with _CUDFNumbaConfig(): test_kernel[1, 1](1, 2, err) assert err[0] == 0 @pytest.mark.parametrize("op", ops) @pytest.mark.parametrize("ty", number_types, ids=number_ids) @pytest.mark.parametrize("constant", [1, 1.5]) def test_compile_arith_masked_vs_constant(op, ty, constant): def func(x): return op(x, constant) cc = (7, 5) ptx, resty = compile_ptx(func, (MaskedType(ty),), cc=cc, device=True) assert isinstance(resty, MaskedType) # Check that the masked typing matches that of the unmasked typing um_ptx, um_resty = compile_ptx(func, (ty,), cc=cc, device=True) assert resty.value_type == um_resty @pytest.mark.parametrize("op", ops) @pytest.mark.parametrize("ty", number_types, ids=number_ids) @pytest.mark.parametrize("constant", [1, 1.5]) def test_compile_arith_constant_vs_masked(op, ty, constant): def func(x): return op(constant, x) cc = (7, 5) ptx, resty = compile_ptx(func, (MaskedType(ty),), cc=cc, device=True) assert isinstance(resty, MaskedType) @pytest.mark.parametrize("op", ops) @pytest.mark.parametrize("ty", number_types, ids=number_ids) def test_compile_arith_masked_vs_na(op, ty): def func(x): return op(x, NA) cc = (7, 5) ptx, resty = compile_ptx(func, (MaskedType(ty),), cc=cc, device=True) assert isinstance(resty, MaskedType) @pytest.mark.parametrize("op", ops) @pytest.mark.parametrize("ty", number_types, ids=number_ids) def test_compile_arith_na_vs_masked(op, ty): def func(x): return op(NA, x) cc = (7, 5) ptx, resty = compile_ptx(func, (MaskedType(ty),), cc=cc, device=True) @pytest.mark.parametrize("op", ops) @parametrize_numeric_dtypes_pairwise @pytest.mark.parametrize( "masked", ((False, True), (True, False), (True, True)), ids=("um", "mu", "mm"), ) def test_compile_arith_masked_ops(op, left_dtype, right_dtype, masked): def func(x, y): return op(x, y) cc = (7, 5) ty1 = from_dtype(np.dtype(left_dtype)) ty2 = from_dtype(np.dtype(right_dtype)) if masked[0]: ty1 = MaskedType(ty1) if masked[1]: ty2 = MaskedType(ty2) ptx, resty = compile_ptx(func, (ty1, ty2), cc=cc, device=True) def func_x_is_na(x): return x is NA def func_na_is_x(x): return NA is x @pytest.mark.parametrize("fn", (func_x_is_na, func_na_is_x)) def test_is_na(fn): valid = Masked(1, True) invalid = Masked(1, False) device_fn = cuda.jit(device=True)(fn) @cuda.jit def test_kernel(err): valid_is_na = device_fn(valid) invalid_is_na = device_fn(invalid) if valid_is_na: err[0] = 1 if not invalid_is_na: err[0] = 2 err = cp.asarray([0], dtype="int8") with _CUDFNumbaConfig(): test_kernel[1, 1](err) assert err[0] == 0 def func_lt_na(x): return x < NA def func_gt_na(x): return x > NA def func_eq_na(x): return x == NA def func_ne_na(x): return x != NA def func_ge_na(x): return x >= NA def func_le_na(x): return x <= NA def func_na_lt(x): return x < NA def func_na_gt(x): return x > NA def func_na_eq(x): return x == NA def func_na_ne(x): return x != NA def func_na_ge(x): return x >= NA def func_na_le(x): return x <= NA na_comparison_funcs = ( func_lt_na, func_gt_na, func_eq_na, func_ne_na, func_ge_na, func_le_na, func_na_lt, func_na_gt, func_na_eq, func_na_ne, func_na_ge, func_na_le, ) @pytest.mark.parametrize("fn", na_comparison_funcs) @pytest.mark.parametrize("ty", number_types, ids=number_ids) def test_na_masked_comparisons(fn, ty): device_fn = cuda.jit(device=True)(fn) @cuda.jit def test_kernel(err): unmasked = ty(1) valid_masked = Masked(unmasked, True) invalid_masked = Masked(unmasked, False) valid_cmp_na = device_fn(valid_masked) invalid_cmp_na = device_fn(invalid_masked) if valid_cmp_na: err[0] = 1 if invalid_cmp_na: err[0] = 2 err = cp.asarray([0], dtype="int8") with _CUDFNumbaConfig(): test_kernel[1, 1](err) assert err[0] == 0 # xfail because scalars do not yet cast for a comparison to NA @pytest.mark.xfail @pytest.mark.parametrize("fn", na_comparison_funcs) @pytest.mark.parametrize("ty", number_types, ids=number_ids) def test_na_scalar_comparisons(fn, ty): device_fn = cuda.jit(device=True)(fn) @cuda.jit def test_kernel(err): unmasked = ty(1) unmasked_cmp_na = device_fn(unmasked) if unmasked_cmp_na: err[0] = 1 err = cp.asarray([0], dtype="int8") with _CUDFNumbaConfig(): test_kernel[1, 1](err) assert err[0] == 0
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_string.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import json import re import urllib.parse from contextlib import ExitStack as does_not_raise from decimal import Decimal from sys import getsizeof import cupy import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf import concat from cudf.core._compat import PANDAS_GE_150 from cudf.core.column.string import StringColumn from cudf.core.index import StringIndex, as_index from cudf.testing._utils import ( DATETIME_TYPES, NUMERIC_TYPES, assert_eq, assert_exceptions_equal, ) from cudf.utils import dtypes as dtypeutils data_list = [ ["AbC", "de", "FGHI", "j", "kLm"], ["nOPq", None, "RsT", None, "uVw"], [None, None, None, None, None], ] data_id_list = ["no_nulls", "some_nulls", "all_nulls"] idx_list = [None, [10, 11, 12, 13, 14]] idx_id_list = ["None_index", "Set_index"] def raise_builder(flags, exceptions): if any(flags): return pytest.raises(exceptions) else: return does_not_raise() @pytest.fixture(params=data_list, ids=data_id_list) def data(request): return request.param @pytest.fixture(params=idx_list, ids=idx_id_list) def index(request): return request.param @pytest.fixture def ps_gs(data, index): ps = pd.Series(data, index=index, dtype="str", name="nice name") gs = cudf.Series(data, index=index, dtype="str", name="nice name") return (ps, gs) @pytest.mark.parametrize("construct", [list, np.array, pd.Series, pa.array]) def test_string_ingest(construct): expect = ["a", "a", "b", "c", "a"] data = construct(expect) got = cudf.Series(data) assert got.dtype == np.dtype("object") assert len(got) == 5 for idx, val in enumerate(expect): assert expect[idx] == got[idx] def test_string_export(ps_gs): ps, gs = ps_gs expect = ps got = gs.to_pandas() assert_eq(expect, got) expect = np.array(ps) got = gs.to_numpy() assert_eq(expect, got) expect = pa.Array.from_pandas(ps) got = gs.to_arrow() assert pa.Array.equals(expect, got) @pytest.mark.parametrize( "item", [ 0, 2, 4, slice(1, 3), [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 4, 3, 2, 1, 0], np.array([0, 1, 2, 3, 4]), cupy.asarray(np.array([0, 1, 2, 3, 4])), ], ) def test_string_get_item(ps_gs, item): ps, gs = ps_gs got = gs.iloc[item] if isinstance(got, cudf.Series): got = got.to_arrow() if isinstance(item, cupy.ndarray): item = cupy.asnumpy(item) expect = ps.iloc[item] if isinstance(expect, pd.Series): expect = pa.Array.from_pandas(expect) pa.Array.equals(expect, got) else: if got is cudf.NA and expect is None: return assert expect == got @pytest.mark.parametrize( "item", [ [True] * 5, [False] * 5, np.array([True] * 5), np.array([False] * 5), cupy.asarray(np.array([True] * 5)), cupy.asarray(np.array([False] * 5)), np.random.randint(0, 2, 5).astype("bool").tolist(), np.random.randint(0, 2, 5).astype("bool"), cupy.asarray(np.random.randint(0, 2, 5).astype("bool")), ], ) def test_string_bool_mask(ps_gs, item): ps, gs = ps_gs got = gs.iloc[item] if isinstance(got, cudf.Series): got = got.to_arrow() if isinstance(item, cupy.ndarray): item = cupy.asnumpy(item) expect = ps[item] if isinstance(expect, pd.Series): expect = pa.Array.from_pandas(expect) pa.Array.equals(expect, got) else: assert expect == got @pytest.mark.parametrize("item", [0, slice(1, 3), slice(5)]) def test_string_repr(ps_gs, item): ps, gs = ps_gs got_out = gs.iloc[item] expect_out = ps.iloc[item] expect = str(expect_out) got = str(got_out) if got_out is not cudf.NA and len(got_out) > 1: expect = expect.replace("None", "<NA>") assert expect == got or (expect == "None" and got == "<NA>") @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + DATETIME_TYPES + ["bool", "object", "str"] ) def test_string_astype(dtype): if ( dtype.startswith("int") or dtype.startswith("uint") or dtype.startswith("long") ): data = ["1", "2", "3", "4", "5"] elif dtype.startswith("float"): data = [ "1.0", "2.0", "3.0", "4.0", None, "5.0", "nan", "-INF", "NaN", "inF", "NAn", ] elif dtype.startswith("bool"): data = ["True", "False", "True", "False", "False"] elif dtype.startswith("datetime64"): data = [ "2019-06-04T00:00:00", "2019-06-04T12:12:12", "2019-06-03T00:00:00", "2019-05-04T00:00:00", "2018-06-04T00:00:00", "1922-07-21T01:02:03", ] elif dtype == "str" or dtype == "object": data = ["ab", "cd", "ef", "gh", "ij"] ps = pd.Series(data) gs = cudf.Series(data) expect = ps.astype(dtype) got = gs.astype(dtype) assert_eq(expect, got) @pytest.mark.parametrize( "data, scale, precision", [ (["1.11", "2.22", "3.33"], 2, 3), (["111", "222", "33"], 0, 3), (["111000", "22000", "3000"], -3, 3), ([None, None, None], 0, 5), ([None, "-2345", None], 0, 5), ([], 0, 5), ], ) @pytest.mark.parametrize( "decimal_dtype", [cudf.Decimal128Dtype, cudf.Decimal64Dtype, cudf.Decimal32Dtype], ) def test_string_to_decimal(data, scale, precision, decimal_dtype): gs = cudf.Series(data, dtype="str") fp = gs.astype(decimal_dtype(scale=scale, precision=precision)) got = fp.astype("str") assert_eq(gs, got) def test_string_empty_to_decimal(): gs = cudf.Series(["", "-85", ""], dtype="str") got = gs.astype(cudf.Decimal64Dtype(scale=0, precision=5)) expected = cudf.Series( [0, -85, 0], dtype=cudf.Decimal64Dtype(scale=0, precision=5), ) assert_eq(expected, got) @pytest.mark.parametrize( "data, scale, precision", [ (["1.23", "-2.34", "3.45"], 2, 3), (["123", "-234", "345"], 0, 3), (["12300", "-400", "5000.0"], -2, 5), ([None, None, None], 0, 5), ([None, "-100", None], 0, 5), ([], 0, 5), ], ) @pytest.mark.parametrize( "decimal_dtype", [cudf.Decimal128Dtype, cudf.Decimal32Dtype, cudf.Decimal64Dtype], ) def test_string_from_decimal(data, scale, precision, decimal_dtype): decimal_data = [] for d in data: if d is None: decimal_data.append(None) else: decimal_data.append(Decimal(d)) fp = cudf.Series( decimal_data, dtype=decimal_dtype(scale=scale, precision=precision), ) gs = fp.astype("str") got = gs.astype(decimal_dtype(scale=scale, precision=precision)) assert_eq(fp, got) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + DATETIME_TYPES + ["bool", "object", "str"] ) def test_string_empty_astype(dtype): data = [] ps = pd.Series(data, dtype="str") gs = cudf.Series(data, dtype="str") expect = ps.astype(dtype) got = gs.astype(dtype) assert_eq(expect, got) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES + ["bool"]) def test_string_numeric_astype(dtype): if dtype.startswith("bool"): data = [1, 0, 1, 0, 1] elif ( dtype.startswith("int") or dtype.startswith("uint") or dtype.startswith("long") ): data = [1, 2, 3, 4, 5] elif dtype.startswith("float"): data = [1.0, 2.0, 3.0, 4.0, 5.0] elif dtype.startswith("datetime64"): # pandas rounds the output format based on the data # Use numpy instead # but fix '2011-01-01T00:00:00' -> '2011-01-01 00:00:00' data = [1000000001, 2000000001, 3000000001, 4000000001, 5000000001] ps = np.asarray(data, dtype=dtype).astype(str) ps = np.array([i.replace("T", " ") for i in ps]) if not dtype.startswith("datetime64"): ps = pd.Series(data, dtype=dtype) gs = cudf.Series(data, dtype=dtype) expect = pd.Series(ps.astype("str")) got = gs.astype("str") assert_eq(expect, got) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES + ["bool"]) def test_string_empty_numeric_astype(dtype): data = [] if dtype.startswith("datetime64"): ps = pd.Series(data, dtype="datetime64[ns]") else: ps = pd.Series(data, dtype=dtype) gs = cudf.Series(data, dtype=dtype) expect = ps.astype("str") got = gs.astype("str") assert_eq(expect, got) def test_string_concat(): data1 = ["a", "b", "c", "d", "e"] data2 = ["f", "g", "h", "i", "j"] index = [1, 2, 3, 4, 5] ps1 = pd.Series(data1, index=index) ps2 = pd.Series(data2, index=index) gs1 = cudf.Series(data1, index=index) gs2 = cudf.Series(data2, index=index) expect = pd.concat([ps1, ps2]) got = concat([gs1, gs2]) assert_eq(expect, got) expect = ps1.str.cat(ps2) got = gs1.str.cat(gs2) assert_eq(expect, got) @pytest.mark.parametrize("ascending", [True, False]) def test_string_sort(ps_gs, ascending): ps, gs = ps_gs expect = ps.sort_values(ascending=ascending) got = gs.sort_values(ascending=ascending) assert_eq(expect, got) def test_string_len(ps_gs): ps, gs = ps_gs expect = ps.str.len() got = gs.str.len() # Can't handle nulls in Pandas so use PyArrow instead # Pandas will return as a float64 so need to typecast to int32 expect = pa.array(expect, from_pandas=True).cast(pa.int32()) got = got.to_arrow() assert pa.Array.equals(expect, got) def _cat_convert_seq_to_cudf(others): pd_others = others if isinstance(pd_others, (pd.Series, pd.Index)): gd_others = cudf.from_pandas(pd_others) else: gd_others = pd_others if isinstance(gd_others, (list, tuple)): temp_tuple = [ cudf.from_pandas(elem) if isinstance(elem, (pd.Series, pd.Index)) else elem for elem in gd_others ] if isinstance(gd_others, tuple): gd_others = tuple(temp_tuple) else: gd_others = list(temp_tuple) return gd_others @pytest.mark.parametrize( "others", [ None, ["f", "g", "h", "i", "j"], ("f", "g", "h", "i", "j"), pd.Series(["f", "g", "h", "i", "j"]), pd.Series(["AbC", "de", "FGHI", "j", "kLm"]), pd.Index(["f", "g", "h", "i", "j"]), pd.Index(["AbC", "de", "FGHI", "j", "kLm"]), ( np.array(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ), [ np.array(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ], [ pd.Series(["f", "g", "h", "i", "j"]), pd.Series(["f", "g", "h", "i", "j"]), ], ( pd.Series(["f", "g", "h", "i", "j"]), pd.Series(["f", "g", "h", "i", "j"]), ), [ pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ], ( pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ), ( pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["1", "2", "3", "4", "5"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), ), [ pd.Index(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), ], [ pd.Series(["hello", "world", "abc", "xyz", "pqr"]), pd.Series(["abc", "xyz", "hello", "pqr", "world"]), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=[10, 11, 12, 13, 14], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=[10, 15, 11, 13, 14], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["10", "11", "12", "13", "14"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["10", "11", "12", "13", "14"], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["10", "11", "12", "13", "14"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["10", "15", "11", "13", "14"], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["1", "2", "3", "4", "5"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["10", "11", "12", "13", "14"], ), ], ], ) @pytest.mark.parametrize("sep", [None, "", " ", "|", ",", "|||"]) @pytest.mark.parametrize("na_rep", [None, "", "null", "a"]) @pytest.mark.parametrize( "index", [["1", "2", "3", "4", "5"]], ) def test_string_cat(ps_gs, others, sep, na_rep, index): ps, gs = ps_gs pd_others = others gd_others = _cat_convert_seq_to_cudf(others) expect = ps.str.cat(others=pd_others, sep=sep, na_rep=na_rep) got = gs.str.cat(others=gd_others, sep=sep, na_rep=na_rep) assert_eq(expect, got) ps.index = index gs.index = index expect = ps.str.cat(others=ps.index, sep=sep, na_rep=na_rep) got = gs.str.cat(others=gs.index, sep=sep, na_rep=na_rep) assert_eq(expect, got) expect = ps.str.cat(others=[ps.index] + [ps.index], sep=sep, na_rep=na_rep) got = gs.str.cat(others=[gs.index] + [gs.index], sep=sep, na_rep=na_rep) assert_eq(expect, got) expect = ps.str.cat(others=(ps.index, ps.index), sep=sep, na_rep=na_rep) got = gs.str.cat(others=(gs.index, gs.index), sep=sep, na_rep=na_rep) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ["1", "2", "3", "4", "5"], ["a", "b", "c", "d", "e"], ["a", "b", "c", None, "e"], ], ) @pytest.mark.parametrize( "others", [ None, ["f", "g", "h", "i", "j"], ("f", "g", "h", "i", "j"), pd.Series(["f", "g", "h", "i", "j"]), pd.Series(["AbC", "de", "FGHI", "j", "kLm"]), pd.Index(["f", "g", "h", "i", "j"]), pd.Index(["AbC", "de", "FGHI", "j", "kLm"]), ( np.array(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ), [ np.array(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ], [ pd.Series(["f", "g", "h", "i", "j"]), pd.Series(["f", "g", "h", "i", "j"]), ], ( pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["1", "2", "3", "4", "5"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), ), [ pd.Index(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["a", "b", "c", "d", "e"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["a", "b", "c", "d", "e"], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=[10, 11, 12, 13, 14], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=[10, 15, 11, 13, 14], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["1", "2", "3", "4", "5"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["1", "2", "3", "4", "5"], ), ], ], ) @pytest.mark.parametrize("sep", [None, "", " ", "|", ",", "|||"]) @pytest.mark.parametrize("na_rep", [None, "", "null", "a"]) @pytest.mark.parametrize("name", [None, "This is the name"]) def test_string_index_str_cat(data, others, sep, na_rep, name): pi, gi = pd.Index(data, name=name), cudf.Index(data, name=name) pd_others = others gd_others = _cat_convert_seq_to_cudf(others) expect = pi.str.cat(others=pd_others, sep=sep, na_rep=na_rep) got = gi.str.cat(others=gd_others, sep=sep, na_rep=na_rep) assert_eq( expect, got, exact=False, ) @pytest.mark.parametrize( "data", [["a", None, "c", None, "e"], ["a", "b", "c", "d", "a"]], ) @pytest.mark.parametrize( "others", [ None, ["f", "g", "h", "i", "j"], pd.Series(["AbC", "de", "FGHI", "j", "kLm"]), pd.Index(["f", "g", "h", "i", "j"]), pd.Index(["AbC", "de", "FGHI", "j", "kLm"]), [ np.array(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ], [ pd.Series(["f", "g", "h", "i", "j"]), pd.Series(["f", "g", "h", "i", "j"]), ], pytest.param( [ pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "g", "h", "i", "j"]), ], marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/5862" ), ), pytest.param( ( pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), pd.Series(["f", "g", "h", "i", "j"]), np.array(["f", "a", "b", "f", "a"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["1", "2", "3", "4", "5"]), np.array(["f", "a", "b", "f", "a"]), pd.Index(["f", "g", "h", "i", "j"]), ), marks=pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/33436" ), ), [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["a", "b", "c", "d", "e"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["a", "b", "c", "d", "e"], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=[10, 11, 12, 13, 14], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=[10, 15, 11, 13, 14], ), ], [ pd.Series( ["hello", "world", "abc", "xyz", "pqr"], index=["1", "2", "3", "4", "5"], ), pd.Series( ["abc", "xyz", "hello", "pqr", "world"], index=["1", "2", "3", "4", "5"], ), ], ], ) @pytest.mark.parametrize("sep", [None, "", " ", "|", ",", "|||"]) @pytest.mark.parametrize("na_rep", [None, "", "null", "a"]) @pytest.mark.parametrize("name", [None, "This is the name"]) def test_string_index_duplicate_str_cat(data, others, sep, na_rep, name): pi, gi = pd.Index(data, name=name), cudf.Index(data, name=name) pd_others = others gd_others = _cat_convert_seq_to_cudf(others) got = gi.str.cat(others=gd_others, sep=sep, na_rep=na_rep) expect = pi.str.cat(others=pd_others, sep=sep, na_rep=na_rep) # TODO: Remove got.sort_values call once we have `join` param support # in `.str.cat` # https://github.com/rapidsai/cudf/issues/5862 assert_eq( expect.sort_values() if not isinstance(expect, str) else expect, got.sort_values() if not isinstance(got, str) else got, exact=False, ) def test_string_cat_str_error(): gs = cudf.Series(["a", "v", "s"]) # https://github.com/pandas-dev/pandas/issues/28277 # ability to pass StringMethods is being removed in future. with pytest.raises( TypeError, match=re.escape( "others must be Series, Index, DataFrame, np.ndarrary " "or list-like (either containing only strings or " "containing only objects of type Series/Index/" "np.ndarray[1-dim])" ), ): gs.str.cat(gs.str) @pytest.mark.parametrize("sep", ["", " ", "|", ",", "|||"]) def test_string_join(ps_gs, sep): ps, gs = ps_gs expect = ps.str.join(sep) got = gs.str.join(sep) assert_eq(expect, got) @pytest.mark.parametrize("pat", [r"(a)", r"(f)", r"([a-z])", r"([A-Z])"]) @pytest.mark.parametrize("expand", [True, False]) @pytest.mark.parametrize( "flags,flags_raise", [(0, 0), (re.M | re.S, 0), (re.I, 1)] ) def test_string_extract(ps_gs, pat, expand, flags, flags_raise): ps, gs = ps_gs expectation = raise_builder([flags_raise], NotImplementedError) with expectation: expect = ps.str.extract(pat, flags=flags, expand=expand) got = gs.str.extract(pat, flags=flags, expand=expand) assert_eq(expect, got) def test_string_invalid_regex(): gs = cudf.Series(["a"]) with pytest.raises(RuntimeError): gs.str.extract(r"{\}") @pytest.mark.parametrize( "pat,regex", [ ("a", False), ("a", True), ("f", False), (r"[a-z]", True), (r"[A-Z]", True), ("hello", False), ("FGHI", False), ], ) @pytest.mark.parametrize( "flags,flags_raise", [(0, 0), (re.MULTILINE | re.DOTALL, 0), (re.I, 1), (re.I | re.DOTALL, 1)], ) @pytest.mark.parametrize("na,na_raise", [(np.nan, 0), (None, 1), ("", 1)]) def test_string_contains(ps_gs, pat, regex, flags, flags_raise, na, na_raise): ps, gs = ps_gs expectation = does_not_raise() if flags_raise or na_raise: expectation = pytest.raises(NotImplementedError) with expectation: expect = ps.str.contains(pat, flags=flags, na=na, regex=regex) got = gs.str.contains(pat, flags=flags, na=na, regex=regex) assert_eq(expect, got) def test_string_contains_case(ps_gs): ps, gs = ps_gs with pytest.raises(NotImplementedError): gs.str.contains("A", case=False) expected = ps.str.contains("A", regex=False, case=False) got = gs.str.contains("A", regex=False, case=False) assert_eq(expected, got) got = gs.str.contains("a", regex=False, case=False) assert_eq(expected, got) @pytest.mark.parametrize( "pat,esc,expect", [ ("abc", "", [True, False, False, False, False, False]), ("b%", "/", [False, True, False, False, False, False]), ("%b", ":", [False, True, False, False, False, False]), ("%b%", "*", [True, True, False, False, False, False]), ("___", "", [True, True, True, False, False, False]), ("__/%", "/", [False, False, True, False, False, False]), ("55/____", "/", [False, False, False, True, False, False]), ("%:%%", ":", [False, False, True, False, False, False]), ("55*_100", "*", [False, False, False, True, False, False]), ("abc", "abc", [True, False, False, False, False, False]), ], ) def test_string_like(pat, esc, expect): expectation = does_not_raise() if len(esc) > 1: expectation = pytest.raises(ValueError) with expectation: gs = cudf.Series(["abc", "bab", "99%", "55_100", "", "556100"]) got = gs.str.like(pat, esc) expect = cudf.Series(expect) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "data", [["hello", "world", None, "", "!"]], ) @pytest.mark.parametrize( "repeats", [ 2, 0, -3, [5, 4, 3, 2, 6], [5, None, 3, 2, 6], [0, 0, 0, 0, 0], [-1, -2, -3, -4, -5], [None, None, None, None, None], ], ) def test_string_repeat(data, repeats): ps = pd.Series(data) gs = cudf.from_pandas(ps) expect = ps.str.repeat(repeats) got = gs.str.repeat(repeats) assert_eq(expect, got) # Pandas doesn't respect the `n` parameter so ignoring it in test parameters @pytest.mark.parametrize( "pat,regex", [("a", False), ("f", False), (r"[a-z]", True), (r"[A-Z]", True)], ) @pytest.mark.parametrize("repl", ["qwerty", "", " "]) @pytest.mark.parametrize("case,case_raise", [(None, 0), (True, 1), (False, 1)]) @pytest.mark.parametrize("flags,flags_raise", [(0, 0), (1, 1)]) def test_string_replace( ps_gs, pat, repl, case, case_raise, flags, flags_raise, regex ): ps, gs = ps_gs expectation = raise_builder([case_raise, flags_raise], NotImplementedError) with expectation: expect = ps.str.replace(pat, repl, case=case, flags=flags, regex=regex) got = gs.str.replace(pat, repl, case=case, flags=flags, regex=regex) assert_eq(expect, got) @pytest.mark.parametrize("pat", ["A*", "F?H?"]) def test_string_replace_zero_length(ps_gs, pat): ps, gs = ps_gs expect = ps.str.replace(pat, "_", regex=True) got = gs.str.replace(pat, "_", regex=True) assert_eq(expect, got) def test_string_lower(ps_gs): ps, gs = ps_gs expect = ps.str.lower() got = gs.str.lower() assert_eq(expect, got) def test_string_upper(ps_gs): ps, gs = ps_gs expect = ps.str.upper() got = gs.str.upper() assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ["a b", " c ", " d", "e ", "f"], ["a-b", "-c-", "---d", "e---", "f"], ["ab", "c", "d", "e", "f"], [None, None, None, None, None], ], ) @pytest.mark.parametrize("pat", [None, " ", "-"]) @pytest.mark.parametrize("n", [-1, 0, 1, 3, 10]) @pytest.mark.parametrize("expand", [True, False]) def test_string_split(data, pat, n, expand): ps = pd.Series(data, dtype="str") gs = cudf.Series(data, dtype="str") expect = ps.str.split(pat=pat, n=n, expand=expand) got = gs.str.split(pat=pat, n=n, expand=expand) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ["a b", " c ", " d", "e ", "f"], ["a-b", "-c-", "---d", "e---", "f"], ["ab", "c", "d", "e", "f"], [None, None, None, None, None], ], ) @pytest.mark.parametrize("pat", [None, " ", "\\-+", "\\s+"]) @pytest.mark.parametrize("n", [-1, 0, 1, 3, 10]) @pytest.mark.parametrize("expand", [True, False]) def test_string_split_re(data, pat, n, expand): ps = pd.Series(data, dtype="str") gs = cudf.Series(data, dtype="str") # Pandas does not support the regex parameter until 1.4.0 expect = ps.str.split(pat=pat, n=n, expand=expand) got = gs.str.split(pat=pat, n=n, expand=expand, regex=True) assert_eq(expect, got) @pytest.mark.parametrize( "str_data", [[], ["a", "b", "c", "d", "e"], [None, None, None, None, None]] ) @pytest.mark.parametrize("num_keys", [1, 2, 3]) def test_string_groupby_key(str_data, num_keys): other_data = [1, 2, 3, 4, 5][: len(str_data)] pdf = pd.DataFrame() gdf = cudf.DataFrame() for i in range(num_keys): pdf[i] = pd.Series(str_data, dtype="str") gdf[i] = cudf.Series(str_data, dtype="str") pdf["a"] = other_data gdf["a"] = other_data expect = pdf.groupby(list(range(num_keys)), as_index=False).count() got = gdf.groupby(list(range(num_keys)), as_index=False).count() expect = expect.sort_values([0]).reset_index(drop=True) got = got.sort_values([0]).reset_index(drop=True) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "str_data", [[], ["a", "b", "c", "d", "e"], [None, None, None, None, None]] ) @pytest.mark.parametrize("num_cols", [1, 2, 3]) @pytest.mark.parametrize("agg", ["count", "max", "min"]) def test_string_groupby_non_key(str_data, num_cols, agg): other_data = [1, 2, 3, 4, 5][: len(str_data)] pdf = pd.DataFrame() gdf = cudf.DataFrame() for i in range(num_cols): pdf[i] = pd.Series(str_data, dtype="str") gdf[i] = cudf.Series(str_data, dtype="str") pdf["a"] = other_data gdf["a"] = other_data expect = getattr(pdf.groupby("a", as_index=False), agg)() got = getattr(gdf.groupby("a", as_index=False), agg)() expect = expect.sort_values(["a"]).reset_index(drop=True) got = got.sort_values(["a"]).reset_index(drop=True) if agg in ["min", "max"] and len(expect) == 0 and len(got) == 0: for i in range(num_cols): expect[i] = expect[i].astype("str") assert_eq(expect, got, check_dtype=False) def test_string_groupby_key_index(): str_data = ["a", "b", "c", "d", "e"] other_data = [1, 2, 3, 4, 5] pdf = pd.DataFrame() gdf = cudf.DataFrame() pdf["a"] = pd.Series(str_data, dtype="str") gdf["a"] = cudf.Series(str_data, dtype="str") pdf["b"] = other_data gdf["b"] = other_data expect = pdf.groupby("a", sort=True).count() got = gdf.groupby("a", sort=True).count() assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize("scalar", ["a", None]) def test_string_set_scalar(scalar): pdf = pd.DataFrame() pdf["a"] = [1, 2, 3, 4, 5] gdf = cudf.DataFrame.from_pandas(pdf) pdf["b"] = "a" gdf["b"] = "a" assert_eq(pdf["b"], gdf["b"]) assert_eq(pdf, gdf) def test_string_index(): pdf = pd.DataFrame(np.random.rand(5, 5)) gdf = cudf.DataFrame.from_pandas(pdf) stringIndex = ["a", "b", "c", "d", "e"] pdf.index = stringIndex gdf.index = stringIndex assert_eq(pdf, gdf) stringIndex = np.array(["a", "b", "c", "d", "e"]) pdf.index = stringIndex gdf.index = stringIndex assert_eq(pdf, gdf) with pytest.warns(FutureWarning): stringIndex = StringIndex(["a", "b", "c", "d", "e"], name="name") pdf.index = stringIndex.to_pandas() gdf.index = stringIndex assert_eq(pdf, gdf) stringIndex = cudf.Index( cudf.core.column.as_column(["a", "b", "c", "d", "e"]), name="name" ) pdf.index = stringIndex.to_pandas() gdf.index = stringIndex assert_eq(pdf, gdf) @pytest.mark.parametrize( "item", [ ["Cbe", "cbe", "CbeD", "Cb", "ghi", "Cb"], ["a", "a", "a", "a", "A"], ["A"], ["abc", "xyz", None, "ab", "123"], [None, None, "abc", None, "abc"], ], ) def test_string_unique(item): ps = pd.Series(item) gs = cudf.Series(item) # Pandas `unique` returns a numpy array pres = pd.Series(ps.unique()) # cudf returns a cudf.Series gres = gs.unique() assert_eq(pres, gres) def test_string_slice(): df = cudf.DataFrame({"a": ["hello", "world"]}) pdf = pd.DataFrame({"a": ["hello", "world"]}) a_slice_got = df.a.str.slice(0, 2) a_slice_expected = pdf.a.str.slice(0, 2) assert isinstance(a_slice_got, cudf.Series) assert_eq(a_slice_expected, a_slice_got) def test_string_equality(): data1 = ["b", "c", "d", "a", "c"] data2 = ["a", None, "c", "a", "c"] ps1 = pd.Series(data1) ps2 = pd.Series(data2) gs1 = cudf.Series(data1) gs2 = cudf.Series(data2) expect = ps1 == ps2 got = gs1 == gs2 assert_eq(expect, got.fillna(False)) expect = ps1 == "m" got = gs1 == "m" assert_eq(expect, got.fillna(False)) ps1 = pd.Series(["a"]) gs1 = cudf.Series(["a"]) expect = ps1 == "m" got = gs1 == "m" assert_eq(expect, got) @pytest.mark.parametrize( "lhs", [ ["Cbe", "cbe", "CbeD", "Cb", "ghi", "Cb"], ["abc", "xyz", "a", "ab", "123", "097"], ], ) @pytest.mark.parametrize( "rhs", [ ["Cbe", "cbe", "CbeD", "Cb", "ghi", "Cb"], ["a", "a", "a", "a", "A", "z"], ], ) def test_string_binary_op_add(lhs, rhs): pds = pd.Series(lhs) + pd.Series(rhs) gds = cudf.Series(lhs) + cudf.Series(rhs) assert_eq(pds, gds) @pytest.mark.parametrize("name", [None, "new name", 123]) def test_string_misc_name(ps_gs, name): ps, gs = ps_gs ps.name = name gs.name = name expect = ps.str.slice(0, 1) got = gs.str.slice(0, 1) assert_eq(expect, got) assert_eq(ps + ps, gs + gs) assert_eq(ps + "RAPIDS", gs + "RAPIDS") assert_eq("RAPIDS" + ps, "RAPIDS" + gs) def test_string_no_children_properties(): empty_col = StringColumn(children=()) assert empty_col.base_children == () assert empty_col.base_size == 0 assert empty_col.children == () assert empty_col.size == 0 assert getsizeof(empty_col) >= 0 # Accounts for Python GC overhead @pytest.mark.parametrize( "string", [ ["Cbe", "cbe", "CbeD", "Cb", "ghi", "Cb"], ["abc", "xyz", "a", "ab", "123", "097"], ["abcdefghij", "0123456789", "9876543210", None, "accénted", ""], ], ) @pytest.mark.parametrize( "index", [-100, -5, -2, -6, -1, 0, 1, 2, 3, 9, 10, 100] ) def test_string_get(string, index): pds = pd.Series(string) gds = cudf.Series(string) assert_eq( pds.str.get(index).fillna(""), gds.str.get(index).fillna(""), ) @pytest.mark.parametrize( "string", [ ["abc", "xyz", "a", "ab", "123", "097"], ["abcdefghij", "0123456789", "9876543210", None, "accénted", ""], ["koala", "fox", "chameleon"], ], ) @pytest.mark.parametrize( "number", [-10, 0, 1, 3, 10], ) @pytest.mark.parametrize( "diff", [0, 2, 5, 9], ) def test_string_slice_str(string, number, diff): pds = pd.Series(string) gds = cudf.Series(string) assert_eq(pds.str.slice(start=number), gds.str.slice(start=number)) assert_eq(pds.str.slice(stop=number), gds.str.slice(stop=number)) assert_eq(pds.str.slice(), gds.str.slice()) assert_eq( pds.str.slice(start=number, stop=number + diff), gds.str.slice(start=number, stop=number + diff), ) if diff != 0: assert_eq(pds.str.slice(step=diff), gds.str.slice(step=diff)) assert_eq( pds.str.slice(start=number, stop=number + diff, step=diff), gds.str.slice(start=number, stop=number + diff, step=diff), ) def test_string_slice_from(): gs = cudf.Series(["hello world", "holy accéntéd", "batman", None, ""]) d_starts = cudf.Series([2, 3, 0, -1, -1], dtype=np.int32) d_stops = cudf.Series([-1, -1, 0, -1, -1], dtype=np.int32) got = gs.str.slice_from(starts=d_starts._column, stops=d_stops._column) expected = cudf.Series(["llo world", "y accéntéd", "", None, ""]) assert_eq(got, expected) @pytest.mark.parametrize( "string", [ ["abc", "xyz", "a", "ab", "123", "097"], ["abcdefghij", "0123456789", "9876543210", None, "accénted", ""], ["koala", "fox", "chameleon"], ], ) @pytest.mark.parametrize("number", [0, 1, 10]) @pytest.mark.parametrize("diff", [0, 2, 9]) @pytest.mark.parametrize("repr", ["2", "!!"]) def test_string_slice_replace(string, number, diff, repr): pds = pd.Series(string) gds = cudf.Series(string) assert_eq( pds.str.slice_replace(start=number, repl=repr), gds.str.slice_replace(start=number, repl=repr), check_dtype=False, ) assert_eq( pds.str.slice_replace(stop=number, repl=repr), gds.str.slice_replace(stop=number, repl=repr), ) assert_eq(pds.str.slice_replace(), gds.str.slice_replace()) assert_eq( pds.str.slice_replace(start=number, stop=number + diff), gds.str.slice_replace(start=number, stop=number + diff), ) assert_eq( pds.str.slice_replace(start=number, stop=number + diff, repl=repr), gds.str.slice_replace(start=number, stop=number + diff, repl=repr), check_dtype=False, ) def test_string_slice_replace_fail(): gs = cudf.Series(["abc", "xyz", ""]) with pytest.raises(TypeError): gs.str.slice_replace(0, 1, ["_"]) def test_string_insert(): gs = cudf.Series(["hello world", "holy accéntéd", "batman", None, ""]) ps = pd.Series(["hello world", "holy accéntéd", "batman", None, ""]) assert_eq(gs.str.insert(0, ""), gs) assert_eq(gs.str.insert(0, "+"), "+" + ps) assert_eq(gs.str.insert(-1, "---"), ps + "---") assert_eq( gs.str.insert(5, "---"), ps.str.slice(stop=5) + "---" + ps.str.slice(start=5), ) with pytest.raises(TypeError): gs.str.insert(0, ["+"]) _string_char_types_data = [ ["abc", "xyz", "a", "ab", "123", "097"], ["abcdefghij", "0123456789", "9876543210", None, "accénted", ""], ["koala", "fox", "chameleon"], [ "1234567890", "de", "1.75", "-34", "+9.8", "7¼", "x³", "2³", "12⅝", "", "\t\r\n ", ], ["one", "one1", "1", ""], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["leopard", "Golden Eagle", "SNAKE", ""], [r"¯\_(ツ)_/¯", "(╯°□°)╯︵ ┻━┻", "┬─┬ノ( º _ ºノ)"], ["a1", "A1", "a!", "A!", "!1", "aA"], ] @pytest.mark.parametrize( "type_op", [ "isdecimal", "isalnum", "isalpha", "isdigit", "isnumeric", "isupper", "islower", ], ) @pytest.mark.parametrize("data", _string_char_types_data) def test_string_char_types(type_op, data): gs = cudf.Series(data) ps = pd.Series(data) assert_eq(getattr(gs.str, type_op)(), getattr(ps.str, type_op)()) def test_string_filter_alphanum(): data = ["1234567890", "!@#$%^&*()", ",./<>?;:[]}{|+=", "abc DEF"] expected = [] for st in data: rs = "" for c in st: if str.isalnum(c): rs = rs + c expected.append(rs) gs = cudf.Series(data) assert_eq(gs.str.filter_alphanum(), cudf.Series(expected)) expected = [] for st in data: rs = "" for c in st: if not str.isalnum(c): rs = rs + c expected.append(rs) assert_eq(gs.str.filter_alphanum(keep=False), cudf.Series(expected)) expected = [] for st in data: rs = "" for c in st: if str.isalnum(c): rs = rs + c else: rs = rs + "*" expected.append(rs) assert_eq(gs.str.filter_alphanum("*"), cudf.Series(expected)) expected = [] for st in data: rs = "" for c in st: if not str.isalnum(c): rs = rs + c else: rs = rs + "*" expected.append(rs) assert_eq(gs.str.filter_alphanum("*", keep=False), cudf.Series(expected)) with pytest.raises(TypeError): gs.str.filter_alphanum(["a"]) @pytest.mark.parametrize( "case_op", ["title", "capitalize", "lower", "upper", "swapcase"] ) @pytest.mark.parametrize( "data", [ *_string_char_types_data, [ None, "The quick bRoWn fox juMps over the laze DOG", '123nr98nv9rev!$#INF4390v03n1243<>?}{:-"', "accénted", ], ], ) def test_string_char_case(case_op, data): gs = cudf.Series(data) ps = pd.Series(data) s = gs.str a = getattr(s, case_op) assert_eq(a(), getattr(ps.str, case_op)()) assert_eq(gs.str.capitalize(), ps.str.capitalize()) assert_eq(gs.str.isdecimal(), ps.str.isdecimal()) assert_eq(gs.str.isalnum(), ps.str.isalnum()) assert_eq(gs.str.isalpha(), ps.str.isalpha()) assert_eq(gs.str.isdigit(), ps.str.isdigit()) assert_eq(gs.str.isnumeric(), ps.str.isnumeric()) assert_eq(gs.str.isspace(), ps.str.isspace()) assert_eq(gs.str.isempty(), ps == "") def test_string_is_title(): data = [ "leopard", "Golden Eagle", "SNAKE", "", "!A", "hello World", "A B C", "#", "AƻB", "Ⓑⓖ", "Art of War", ] gs = cudf.Series(data) ps = pd.Series(data) assert_eq(gs.str.istitle(), ps.str.istitle()) @pytest.mark.parametrize( "data", [ ["koala", "fox", "chameleon"], ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ], ) def test_strings_rpartition(data): gs = cudf.Series(data) ps = pd.Series(data) assert_eq(ps.str.rpartition(), gs.str.rpartition()) assert_eq(ps.str.rpartition("-"), gs.str.rpartition("-")) assert_eq(ps.str.rpartition(","), gs.str.rpartition(",")) @pytest.mark.parametrize( "data", [ ["koala", "fox", "chameleon"], ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ], ) def test_strings_partition(data): gs = cudf.Series(data, name="str_name") ps = pd.Series(data, name="str_name") assert_eq(ps.str.partition(), gs.str.partition()) assert_eq(ps.str.partition(","), gs.str.partition(",")) assert_eq(ps.str.partition("-"), gs.str.partition("-")) gi = as_index(data, name="new name") pi = pd.Index(data, name="new name") assert_eq(pi.str.partition(), gi.str.partition()) assert_eq(pi.str.partition(","), gi.str.partition(",")) assert_eq(pi.str.partition("-"), gi.str.partition("-")) def test_string_partition_fail(): gs = cudf.Series(["abc", "aa", "cba"]) with pytest.raises(TypeError): gs.str.partition(["a"]) with pytest.raises(TypeError): gs.str.rpartition(["a"]) @pytest.mark.parametrize( "data", [ ["koala", "fox", "chameleon"], ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], [ "this is a regular sentence", "https://docs.python.org/3/tutorial/index.html", None, ], ], ) @pytest.mark.parametrize("n", [-1, 2, 1, 9]) @pytest.mark.parametrize("expand", [True, False]) def test_strings_rsplit(data, n, expand): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( ps.str.rsplit(n=n, expand=expand).reset_index(), gs.str.rsplit(n=n, expand=expand).reset_index(), check_index_type=False, ) assert_eq( ps.str.rsplit(",", n=n, expand=expand), gs.str.rsplit(",", n=n, expand=expand), ) assert_eq( ps.str.rsplit("-", n=n, expand=expand), gs.str.rsplit("-", n=n, expand=expand), ) @pytest.mark.parametrize("n", [-1, 0, 1, 3, 10]) @pytest.mark.parametrize("expand", [True, False]) def test_string_rsplit_re(n, expand): data = ["a b", " c ", " d", "e ", "f"] ps = pd.Series(data, dtype="str") gs = cudf.Series(data, dtype="str") # Pandas does not yet support the regex parameter for rsplit import inspect assert ( "regex" not in inspect.signature(pd.Series.str.rsplit).parameters.keys() ) expect = ps.str.rsplit(pat=" ", n=n, expand=expand) got = gs.str.rsplit(pat="\\s", n=n, expand=expand, regex=True) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ["koala", "fox", "chameleon"], ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], [ "this is a regular sentence", "https://docs.python.org/3/tutorial/index.html", None, ], ], ) @pytest.mark.parametrize("n", [-1, 2, 1, 9]) @pytest.mark.parametrize("expand", [True, False]) def test_strings_split(data, n, expand): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( ps.str.split(n=n, expand=expand).reset_index(), gs.str.split(n=n, expand=expand).reset_index(), check_index_type=False, ) assert_eq( ps.str.split(",", n=n, expand=expand), gs.str.split(",", n=n, expand=expand), ) assert_eq( ps.str.split("-", n=n, expand=expand), gs.str.split("-", n=n, expand=expand), ) @pytest.mark.parametrize( "data", [ ["koala", "fox", "chameleon"], ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], [ "this is a regular sentence", "https://docs.python.org/3/tutorial/index.html", None, ], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize( "to_strip", ["⅕", None, "123.", ".!? \n\t", "123.!? \n\t", " ", ".", ","] ) def test_strings_strip_tests(data, to_strip): gs = cudf.Series(data) ps = pd.Series(data) assert_eq(ps.str.strip(to_strip=to_strip), gs.str.strip(to_strip=to_strip)) assert_eq( ps.str.rstrip(to_strip=to_strip), gs.str.rstrip(to_strip=to_strip) ) assert_eq( ps.str.lstrip(to_strip=to_strip), gs.str.lstrip(to_strip=to_strip) ) gi = as_index(data) pi = pd.Index(data) assert_eq(pi.str.strip(to_strip=to_strip), gi.str.strip(to_strip=to_strip)) assert_eq( pi.str.rstrip(to_strip=to_strip), gi.str.rstrip(to_strip=to_strip) ) assert_eq( pi.str.lstrip(to_strip=to_strip), gi.str.lstrip(to_strip=to_strip) ) def test_string_strip_fail(): gs = cudf.Series(["a", "aa", ""]) with pytest.raises(TypeError): gs.str.strip(["a"]) with pytest.raises(TypeError): gs.str.lstrip(["a"]) with pytest.raises(TypeError): gs.str.rstrip(["a"]) @pytest.mark.parametrize( "data", [ ["koala", "fox", "chameleon"], ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], [ "this is a regular sentence", "https://docs.python.org/3/tutorial/index.html", None, ], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize("width", [0, 1, 4, 9, 100]) @pytest.mark.parametrize("fillchar", ["⅕", "1", ".", "t", " ", ","]) def test_strings_filling_tests(data, width, fillchar): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( ps.str.center(width=width, fillchar=fillchar), gs.str.center(width=width, fillchar=fillchar), ) assert_eq( ps.str.ljust(width=width, fillchar=fillchar), gs.str.ljust(width=width, fillchar=fillchar), ) assert_eq( ps.str.rjust(width=width, fillchar=fillchar), gs.str.rjust(width=width, fillchar=fillchar), ) gi = as_index(data) pi = pd.Index(data) assert_eq( pi.str.center(width=width, fillchar=fillchar), gi.str.center(width=width, fillchar=fillchar), ) assert_eq( pi.str.ljust(width=width, fillchar=fillchar), gi.str.ljust(width=width, fillchar=fillchar), ) assert_eq( pi.str.rjust(width=width, fillchar=fillchar), gi.str.rjust(width=width, fillchar=fillchar), ) @pytest.mark.parametrize( "data", [ ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["³", "⅕", ""], pytest.param( ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/issues/20868", ), ), [" ", "\t\r\n ", ""], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize("width", [0, 1, 4, 6, 9, 100]) def test_strings_zfill_tests(data, width): gs = cudf.Series(data) ps = pd.Series(data) assert_eq(ps.str.zfill(width=width), gs.str.zfill(width=width)) gi = as_index(data) pi = pd.Index(data) assert_eq(pi.str.zfill(width=width), gi.str.zfill(width=width)) @pytest.mark.parametrize( "data", [ ["A,,B", "1,,5", "3,00,0"], ["Linda van der Berg", "George Pitt-Rivers"], ["+23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize("width", [0, 1, 4, 9, 100]) @pytest.mark.parametrize( "side", ["left", "right", "both"], ) @pytest.mark.parametrize("fillchar", [" ", ".", "\n", "+", "\t"]) def test_strings_pad_tests(data, width, side, fillchar): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( ps.str.pad(width=width, side=side, fillchar=fillchar), gs.str.pad(width=width, side=side, fillchar=fillchar), ) gi = as_index(data) pi = pd.Index(data) assert_eq( pi.str.pad(width=width, side=side, fillchar=fillchar), gi.str.pad(width=width, side=side, fillchar=fillchar), ) @pytest.mark.parametrize( "data", [ ["abc", "xyz", "a", "ab", "123", "097"], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], # [" ", "\t\r\n ", ""], ["leopard", "Golden Eagle", "SNAKE", ""], ["line to be wrapped", "another line to be wrapped"], ], ) @pytest.mark.parametrize("width", [1, 4, 8, 12, 100]) def test_string_wrap(data, width): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( gs.str.wrap( width=width, break_long_words=False, expand_tabs=False, replace_whitespace=True, drop_whitespace=True, break_on_hyphens=False, ), ps.str.wrap( width=width, break_long_words=False, expand_tabs=False, replace_whitespace=True, drop_whitespace=True, break_on_hyphens=False, ), ) gi = as_index(data) pi = pd.Index(data) assert_eq( gi.str.wrap( width=width, break_long_words=False, expand_tabs=False, replace_whitespace=True, drop_whitespace=True, break_on_hyphens=False, ), pi.str.wrap( width=width, break_long_words=False, expand_tabs=False, replace_whitespace=True, drop_whitespace=True, break_on_hyphens=False, ), ) @pytest.mark.parametrize( "data", [ ["abc", "xyz", "a", "ab", "123", "097"], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["$", "B", "Aab$", "$$ca", "C$B$", "cat", "cat\ndog"], ["line\nto be wrapped", "another\nline\nto be wrapped"], ], ) @pytest.mark.parametrize( "pat", ["a", " ", "\t", "another", "0", r"\$", "^line$", "line.*be", "cat$"], ) @pytest.mark.parametrize("flags", [0, re.MULTILINE, re.DOTALL]) def test_string_count(data, pat, flags): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( gs.str.count(pat=pat, flags=flags), ps.str.count(pat=pat, flags=flags), check_dtype=False, ) assert_eq(as_index(gs).str.count(pat=pat), pd.Index(ps).str.count(pat=pat)) @pytest.mark.parametrize( "pat, flags", [ ("Monkey", 0), ("on", 0), ("b", 0), ("on$", 0), ("on$", re.MULTILINE), ("o.*k", re.DOTALL), ], ) def test_string_findall(pat, flags): test_data = ["Lion", "Monkey", "Rabbit", "Don\nkey"] ps = pd.Series(test_data) gs = cudf.Series(test_data) expected = ps.str.findall(pat, flags) actual = gs.str.findall(pat, flags) assert_eq(expected, actual) def test_string_replace_multi(): ps = pd.Series(["hello", "goodbye"]) gs = cudf.Series(["hello", "goodbye"]) expect = ps.str.replace("e", "E").str.replace("o", "O") got = gs.str.replace(["e", "o"], ["E", "O"]) assert_eq(expect, got) ps = pd.Series(["foo", "fuz", np.nan]) gs = cudf.Series.from_pandas(ps) expect = ps.str.replace("f.", "ba", regex=True) got = gs.str.replace(["f."], ["ba"], regex=True) assert_eq(expect, got) ps = pd.Series(["f.o", "fuz", np.nan]) gs = cudf.Series.from_pandas(ps) expect = ps.str.replace("f.", "ba", regex=False) got = gs.str.replace(["f."], ["ba"], regex=False) assert_eq(expect, got) @pytest.mark.parametrize( "find", [ "(\\d)(\\d)", "(\\d)(\\d)", "(\\d)(\\d)", "(\\d)(\\d)", "([a-z])-([a-z])", "([a-z])-([a-zé])", "([a-z])-([a-z])", "([a-z])-([a-zé])", re.compile("([A-Z])(\\d)"), ], ) @pytest.mark.parametrize( "replace", ["\\1-\\2", "V\\2-\\1", "\\1 \\2", "\\2 \\1", "X\\1+\\2Z", "X\\1+\\2Z"], ) def test_string_replace_with_backrefs(find, replace): s = [ "A543", "Z756", "", None, "tést-string", "two-thréé four-fivé", "abcd-éfgh", "tést-string-again", ] ps = pd.Series(s) gs = cudf.Series(s) got = gs.str.replace_with_backrefs(find, replace) expected = ps.str.replace(find, replace, regex=True) assert_eq(got, expected) got = as_index(gs).str.replace_with_backrefs(find, replace) expected = pd.Index(ps).str.replace(find, replace, regex=True) assert_eq(got, expected) def test_string_table_view_creation(): data = ["hi"] * 25 + [None] * 2027 psr = pd.Series(data) gsr = cudf.Series.from_pandas(psr) expect = psr[:1] got = gsr[:1] assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ["abc", "xyz", "a", "ab", "123", "097"], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ["line to be wrapped", "another line to be wrapped"], ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize( "pat", ["", None, " ", "a", "abc", "cat", "$", "\n"], ) def test_string_starts_ends(data, pat): ps = pd.Series(data) gs = cudf.Series(data) if pat is None: assert_exceptions_equal( lfunc=ps.str.startswith, rfunc=gs.str.startswith, lfunc_args_and_kwargs=([pat],), rfunc_args_and_kwargs=([pat],), ) assert_exceptions_equal( lfunc=ps.str.endswith, rfunc=gs.str.endswith, lfunc_args_and_kwargs=([pat],), rfunc_args_and_kwargs=([pat],), ) else: assert_eq( ps.str.startswith(pat), gs.str.startswith(pat), check_dtype=False ) assert_eq( ps.str.endswith(pat), gs.str.endswith(pat), check_dtype=False ) @pytest.mark.parametrize( "data,pat", [ ( ["abc", "xyz", "a", "ab", "123", "097"], ["abc", "x", "a", "b", "3", "7"], ), (["A B", "1.5", "3,000"], ["A ", ".", ","]), (["23", "³", "⅕", ""], ["23", "³", "⅕", ""]), ([" ", "\t\r\n ", ""], ["d", "\n ", ""]), ( ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ["$", "$", "a", "<", "(", "#"], ), ( ["line to be wrapped", "another line to be wrapped"], ["another", "wrapped"], ), ( ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], ["hsdjfk", None, "ll", "+", "-", "w", "-", "én"], ), ( ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ), ], ) def test_string_starts_ends_list_like_pat(data, pat): gs = cudf.Series(data) starts_expected = [] ends_expected = [] for i in range(len(pat)): if data[i] is None: starts_expected.append(None) ends_expected.append(None) else: if pat[i] is None: starts_expected.append(False) ends_expected.append(False) else: starts_expected.append(data[i].startswith(pat[i])) ends_expected.append(data[i].endswith(pat[i])) starts_expected = pd.Series(starts_expected) ends_expected = pd.Series(ends_expected) assert_eq(starts_expected, gs.str.startswith(pat), check_dtype=False) assert_eq(ends_expected, gs.str.endswith(pat), check_dtype=False) @pytest.mark.parametrize( "data", [ ["str_foo", "str_bar", "no_prefix", "", None], ["foo_str", "bar_str", "no_suffix", "", None], ], ) def test_string_remove_suffix_prefix(data): ps = pd.Series(data) gs = cudf.Series(data) got = gs.str.removeprefix("str_") expect = ps.str.removeprefix("str_") assert_eq( expect, got, check_dtype=False, ) got = gs.str.removesuffix("_str") expect = ps.str.removesuffix("_str") assert_eq( expect, got, check_dtype=False, ) @pytest.mark.parametrize( "data", [ ["abc", "xyz", "a", "ab", "123", "097"], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ["line to be wrapped", "another line to be wrapped"], ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize( "sub", ["", " ", "a", "abc", "cat", "$", "\n"], ) def test_string_find(data, sub): ps = pd.Series(data) gs = cudf.Series(data) got = gs.str.find(sub) expect = ps.str.find(sub) assert_eq( expect, got, check_dtype=False, ) got = gs.str.find(sub, start=1) expect = ps.str.find(sub, start=1) assert_eq( expect, got, check_dtype=False, ) got = gs.str.find(sub, end=10) expect = ps.str.find(sub, end=10) assert_eq( expect, got, check_dtype=False, ) got = gs.str.find(sub, start=2, end=10) expect = ps.str.find(sub, start=2, end=10) assert_eq( expect, got, check_dtype=False, ) got = gs.str.rfind(sub) expect = ps.str.rfind(sub) assert_eq( expect, got, check_dtype=False, ) got = gs.str.rfind(sub, start=1) expect = ps.str.rfind(sub, start=1) assert_eq( expect, got, check_dtype=False, ) got = gs.str.rfind(sub, end=10) expect = ps.str.rfind(sub, end=10) assert_eq( expect, got, check_dtype=False, ) got = gs.str.rfind(sub, start=2, end=10) expect = ps.str.rfind(sub, start=2, end=10) assert_eq( expect, got, check_dtype=False, ) @pytest.mark.parametrize( "data,sub,er", [ (["abc", "xyz", "a", "ab", "123", "097"], "a", ValueError), (["A B", "1.5", "3,000"], "abc", ValueError), (["23", "³", "⅕", ""], "⅕", ValueError), ([" ", "\t\r\n ", ""], "\n", ValueError), (["$", "B", "Aab$", "$$ca", "C$B$", "cat"], "$", ValueError), (["line to be wrapped", "another line to be wrapped"], " ", None), ( ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], "+", ValueError, ), (["line to be wrapped", "another line to be wrapped"], "", None), ], ) def test_string_str_index(data, sub, er): ps = pd.Series(data) gs = cudf.Series(data) if er is None: assert_eq(ps.str.index(sub), gs.str.index(sub), check_dtype=False) try: ps.str.index(sub) except er: pass else: assert not er try: gs.str.index(sub) except er: pass else: assert not er @pytest.mark.parametrize( "data,sub,er", [ (["abc", "xyz", "a", "ab", "123", "097"], "a", ValueError), (["A B", "1.5", "3,000"], "abc", ValueError), (["23", "³", "⅕", ""], "⅕", ValueError), ([" ", "\t\r\n ", ""], "\n", ValueError), (["$", "B", "Aab$", "$$ca", "C$B$", "cat"], "$", ValueError), (["line to be wrapped", "another line to be wrapped"], " ", None), ( ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], "+", ValueError, ), (["line to be wrapped", "another line to be wrapped"], "", None), ], ) def test_string_str_rindex(data, sub, er): ps = pd.Series(data) gs = cudf.Series(data) if er is None: assert_eq(ps.str.rindex(sub), gs.str.rindex(sub), check_dtype=False) assert_eq(pd.Index(ps).str.rindex(sub), as_index(gs).str.rindex(sub)) try: ps.str.rindex(sub) except er: pass else: assert not er try: gs.str.rindex(sub) except er: pass else: assert not er @pytest.mark.parametrize( "data,sub,expect", [ ( ["abc", "xyz", "a", "ab", "123", "097"], ["b", "y", "a", "c", "4", "8"], [True, True, True, False, False, False], ), ( ["A B", "1.5", "3,000", "23", "³", "⅕"], ["A B", ".", ",", "1", " ", " "], [True, True, True, False, False, False], ), ( [" ", "\t", "\r", "\f ", "\n", ""], ["", "\t", "\r", "xx", "yy", "zz"], [True, True, True, False, False, False], ), ( ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ["$", "B", "ab", "*", "@", "dog"], [True, True, True, False, False, False], ), ( ["hello", "there", "world", "-1234", None, "accént"], ["lo", "e", "o", "+1234", " ", "e"], [True, True, True, False, None, False], ), ( ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", "", "x", None], ["A", "B", "C", " ", "y", "e"], [True, True, True, False, False, None], ), ], ) def test_string_contains_multi(data, sub, expect): gs = cudf.Series(data) sub = cudf.Series(sub) got = gs.str.contains(sub) expect = cudf.Series(expect) assert_eq(expect, got, check_dtype=False) # Pandas does not allow 'case' or 'flags' if 'pat' is re.Pattern # This covers contains, match, count, and replace @pytest.mark.parametrize( "pat", [re.compile("[n-z]"), re.compile("[A-Z]"), re.compile("de"), "A"], ) @pytest.mark.parametrize("repl", ["xyz", "", " "]) def test_string_compiled_re(ps_gs, pat, repl): ps, gs = ps_gs expect = ps.str.contains(pat, regex=True) got = gs.str.contains(pat, regex=True) assert_eq(expect, got) expect = ps.str.match(pat) got = gs.str.match(pat) assert_eq(expect, got) expect = ps.str.count(pat) got = gs.str.count(pat) assert_eq(expect, got, check_dtype=False) expect = ps.str.replace(pat, repl, regex=True) got = gs.str.replace(pat, repl, regex=True) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ ["abc", "xyz", "a", "ab", "123", "097"], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ["line to be wrapped", "another line to be wrapped"], ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) @pytest.mark.parametrize("pat", ["", " ", "a", "abc", "cat", "$", "\n"]) def test_string_str_match(data, pat): ps = pd.Series(data) gs = cudf.Series(data) assert_eq(ps.str.match(pat), gs.str.match(pat)) assert_eq( pd.Index(pd.Index(ps).str.match(pat)), as_index(gs).str.match(pat) ) @pytest.mark.parametrize( "data", [ ["abc", "xyz", "a", "ab", "123", "097"], ["A B", "1.5", "3,000"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ["line to be wrapped", "another line to be wrapped"], ["hello", "there", "world", "+1234", "-1234", None, "accént", ""], ["1. Ant. ", "2. Bee!\n", "3. Cat?\t", None], ], ) def test_string_str_translate(data): ps = pd.Series(data) gs = cudf.Series(data) assert_eq( ps.str.translate(str.maketrans({"a": "z"})), gs.str.translate(str.maketrans({"a": "z"})), ) assert_eq( pd.Index(ps).str.translate(str.maketrans({"a": "z"})), as_index(gs).str.translate(str.maketrans({"a": "z"})), ) assert_eq( ps.str.translate(str.maketrans({"a": "z", "i": "$", "z": "1"})), gs.str.translate(str.maketrans({"a": "z", "i": "$", "z": "1"})), ) assert_eq( pd.Index(ps).str.translate( str.maketrans({"a": "z", "i": "$", "z": "1"}) ), as_index(gs).str.translate( str.maketrans({"a": "z", "i": "$", "z": "1"}) ), ) assert_eq( ps.str.translate( str.maketrans({"+": "-", "-": "$", "?": "!", "B": "."}) ), gs.str.translate( str.maketrans({"+": "-", "-": "$", "?": "!", "B": "."}) ), ) assert_eq( pd.Index(ps).str.translate( str.maketrans({"+": "-", "-": "$", "?": "!", "B": "."}) ), as_index(gs).str.translate( str.maketrans({"+": "-", "-": "$", "?": "!", "B": "."}) ), ) assert_eq( ps.str.translate(str.maketrans({"é": "É"})), gs.str.translate(str.maketrans({"é": "É"})), ) def test_string_str_filter_characters(): data = [ "hello world", "A+B+C+D", "?!@#$%^&*()", "accént", None, "$1.50", "", ] gs = cudf.Series(data) expected = cudf.Series( ["helloworld", "ABCD", "", "accnt", None, "150", ""] ) filter = {"a": "z", "A": "Z", "0": "9"} assert_eq(expected, gs.str.filter_characters(filter)) expected = cudf.Series([" ", "+++", "?!@#$%^&*()", "é", None, "$.", ""]) assert_eq(expected, gs.str.filter_characters(filter, False)) expected = cudf.Series( ["hello world", "A B C D", " ", "acc nt", None, " 1 50", ""] ) assert_eq(expected, gs.str.filter_characters(filter, True, " ")) with pytest.raises(TypeError): gs.str.filter_characters(filter, True, ["a"]) def test_string_str_code_points(): data = [ "abc", "Def", None, "jLl", "dog and cat", "accénted", "", " 1234 ", "XYZ", ] gs = cudf.Series(data) expected = [ 97, 98, 99, 68, 101, 102, 106, 76, 108, 100, 111, 103, 32, 97, 110, 100, 32, 99, 97, 116, 97, 99, 99, 50089, 110, 116, 101, 100, 32, 49, 50, 51, 52, 32, 88, 89, 90, ] expected = cudf.Series(expected) assert_eq(expected, gs.str.code_points(), check_dtype=False) @pytest.mark.parametrize( "data", [ ["http://www.hellow.com", "/home/nvidia/nfs", "123.45 ~ABCDEF"], ["23", "³", "⅕", ""], [" ", "\t\r\n ", ""], ["$", "B", "Aab$", "$$ca", "C$B$", "cat"], ], ) def test_string_str_url_encode(data): gs = cudf.Series(data) got = gs.str.url_encode() expected = pd.Series([urllib.parse.quote(url, safe="~") for url in data]) assert_eq(expected, got) @pytest.mark.parametrize( "data", [ [ "http://www.hellow.com?k1=acc%C3%A9nted&k2=a%2F/b.c", "%2Fhome%2fnfs", "987%20ZYX", ] ], ) def test_string_str_decode_url(data): gs = cudf.Series(data) got = gs.str.url_decode() expected = pd.Series([urllib.parse.unquote(url) for url in data]) assert_eq(expected, got) @pytest.mark.parametrize( "data,dtype", [ (["0.1", "10.2", "10.876"], "float"), (["-0.1", "10.2", "+10.876"], "float"), (["1", "10.2", "10.876"], "float32"), (["+123", "6344556789", "0"], "int"), (["+123", "6344556789", "0"], "uint64"), (["+123", "6344556789", "0"], "float"), (["0.1", "-10.2", "10.876", None], "float"), ], ) @pytest.mark.parametrize("obj_type", [None, "str", "category"]) def test_string_typecast(data, obj_type, dtype): psr = pd.Series(data, dtype=obj_type) gsr = cudf.Series(data, dtype=obj_type) expect = psr.astype(dtype=dtype) actual = gsr.astype(dtype=dtype) assert_eq(expect, actual) @pytest.mark.parametrize( "data,dtype", [ (["0.1", "10.2", "10.876"], "int"), (["1", "10.2", "+10.876"], "int"), (["abc", "1", "2", " "], "int"), (["0.1", "10.2", "10.876"], "uint64"), (["1", "10.2", "+10.876"], "uint64"), (["abc", "1", "2", " "], "uint64"), ([" ", "0.1", "2"], "float"), ([""], "int"), ([""], "uint64"), ([" "], "float"), (["\n"], "int"), (["\n"], "uint64"), (["0.1", "-10.2", "10.876", None], "int"), (["0.1", "-10.2", "10.876", None], "uint64"), (["0.1", "-10.2", "10.876", None, "ab"], "float"), (["+", "-"], "float"), (["+", "-"], "int"), (["+", "-"], "uint64"), (["1++++", "--2"], "float"), (["1++++", "--2"], "int"), (["1++++", "--2"], "uint64"), (["++++1", "--2"], "float"), (["++++1", "--2"], "int"), (["++++1", "--2"], "uint64"), ], ) @pytest.mark.parametrize("obj_type", [None, "str", "category"]) def test_string_typecast_error(data, obj_type, dtype): psr = pd.Series(data, dtype=obj_type) gsr = cudf.Series(data, dtype=obj_type) assert_exceptions_equal( lfunc=psr.astype, rfunc=gsr.astype, lfunc_args_and_kwargs=([dtype],), rfunc_args_and_kwargs=([dtype],), ) @pytest.mark.parametrize( "data", [ ["f0:18:98:22:c2:e4", "00:00:00:00:00:00", "ff:ff:ff:ff:ff:ff"], ["f0189822c2e4", "000000000000", "ffffffffffff"], ["0xf0189822c2e4", "0x000000000000", "0xffffffffffff"], ["0Xf0189822c2e4", "0X000000000000", "0Xffffffffffff"], ], ) def test_string_hex_to_int(data): gsr = cudf.Series(data) expected = cudf.Series([263988422296292, 0, 281474976710655]) got = gsr.str.htoi() assert_eq(expected, got) got = gsr.str.hex_to_int() # alias assert_eq(expected, got) def test_string_ishex(): gsr = cudf.Series(["", None, "0x01a2b3c4d5e6f", "0789", "ABCDEF0"]) got = gsr.str.ishex() expected = cudf.Series([False, None, True, True, True]) assert_eq(expected, got) def test_string_istimestamp(): gsr = cudf.Series( [ "", None, "20201009 123456.987654AM+0100", "1920111 012345.000001", "18201235 012345.1", "20201009 250001.2", "20201009 129901.3", "20201009 123499.4", "20201009 000000.500000PM-0130", "20201009:000000.600000", "20201009 010203.700000PM-2500", "20201009 010203.800000AM+0590", "20201009 010203.900000AP-0000", ] ) got = gsr.str.istimestamp(r"%Y%m%d %H%M%S.%f%p%z") expected = cudf.Series( [ False, None, True, False, False, False, False, False, True, False, False, False, False, ] ) assert_eq(expected, got) def test_string_ip4_to_int(): gsr = cudf.Series( ["", None, "hello", "41.168.0.1", "127.0.0.1", "41.197.0.1"] ) expected = cudf.Series([0, None, 0, 698875905, 2130706433, 700776449]) got = gsr.str.ip2int() assert_eq(expected, got) got = gsr.str.ip_to_int() # alias assert_eq(expected, got) def test_string_int_to_ipv4(): gsr = cudf.Series([0, None, 0, 698875905, 2130706433, 700776449]) expected = cudf.Series( ["0.0.0.0", None, "0.0.0.0", "41.168.0.1", "127.0.0.1", "41.197.0.1"] ) got = cudf.Series(gsr._column.int2ip()) assert_eq(expected, got) def test_string_isipv4(): gsr = cudf.Series( [ "", None, "1...1", "141.168.0.1", "127.0.0.1", "1.255.0.1", "256.27.28.26", "25.257.28.26", "25.27.258.26", "25.27.28.256", "-1.0.0.0", ] ) got = gsr.str.isipv4() expected = cudf.Series( [ False, None, False, True, True, True, False, False, False, False, False, ] ) assert_eq(expected, got) @pytest.mark.parametrize( "dtype", sorted(list(dtypeutils.NUMERIC_TYPES - {"int64", "uint64"})) ) def test_string_int_to_ipv4_dtype_fail(dtype): gsr = cudf.Series([1, 2, 3, 4, 5]).astype(dtype) with pytest.raises(TypeError): gsr._column.int2ip() @pytest.mark.parametrize( "data", [ ["abc", "xyz", "pqr", "tuv"], ["aaaaaaaaaaaa"], ["aaaaaaaaaaaa", "bdfeqwert", "poiuytre"], ], ) @pytest.mark.parametrize( "index", [ 0, 1, 2, slice(0, 1, 2), slice(0, 5, 2), slice(-1, -2, 1), slice(-1, -2, -1), slice(-2, -1, -1), slice(-2, -1, 1), slice(0), slice(None), ], ) def test_string_str_subscriptable(data, index): psr = pd.Series(data) gsr = cudf.Series(data) assert_eq(psr.str[index], gsr.str[index]) psi = pd.Index(data) gsi = cudf.Index(data) assert_eq(psi.str[index], gsi.str[index]) @pytest.mark.parametrize( "data,expected", [ (["abc", "xyz", "pqr", "tuv"], [3, 3, 3, 3]), (["aaaaaaaaaaaa"], [12]), (["aaaaaaaaaaaa", "bdfeqwert", "poiuytre"], [12, 9, 8]), (["abc", "d", "ef"], [3, 1, 2]), (["Hello", "Bye", "Thanks 😊"], [5, 3, 11]), (["\n\t", "Bye", "Thanks 😊"], [2, 3, 11]), ], ) def test_string_str_byte_count(data, expected): sr = cudf.Series(data) expected = cudf.Series(expected, dtype="int32") actual = sr.str.byte_count() assert_eq(expected, actual) si = as_index(data) expected = as_index(expected, dtype="int32") actual = si.str.byte_count() assert_eq(expected, actual) @pytest.mark.parametrize( "data,expected", [ (["1", "2", "3", "4", "5"], [True, True, True, True, True]), ( ["1.1", "2.0", "3.2", "4.3", "5."], [False, False, False, False, False], ), ( [".12312", "213123.", ".3223.", "323423.."], [False, False, False, False], ), ([""], [False]), ( ["1..1", "+2", "++3", "4++", "-5"], [False, True, False, False, True], ), ( [ "24313345435345 ", "+2632726478", "++367293674326", "4382493264392746.237649274692++", "-578239479238469264", ], [False, True, False, False, True], ), ( ["2a2b", "a+b", "++a", "a.b++", "-b"], [False, False, False, False, False], ), ( ["2a2b", "1+3", "9.0++a", "+", "-"], [False, False, False, False, False], ), ], ) def test_str_isinteger(data, expected): sr = cudf.Series(data, dtype="str") expected = cudf.Series(expected) actual = sr.str.isinteger() assert_eq(expected, actual) sr = as_index(data) expected = as_index(expected) actual = sr.str.isinteger() assert_eq(expected, actual) @pytest.mark.parametrize( "data,expected", [ (["1", "2", "3", "4", "5"], [True, True, True, True, True]), (["1.1", "2.0", "3.2", "4.3", "5."], [True, True, True, True, True]), ([""], [False]), ( [".12312", "213123.", ".3223.", "323423.."], [True, True, False, False], ), ( ["1.00.323.1", "+2.1", "++3.30", "4.9991++", "-5.3"], [False, True, False, False, True], ), ( [ "24313345435345 ", "+2632726478", "++367293674326", "4382493264392746.237649274692++", "-578239479238469264", ], [False, True, False, False, True], ), ( [ "24313345435345.32732 ", "+2632726478.3627638276", "++0.326294632367293674326", "4382493264392746.237649274692++", "-57823947923.8469264", ], [False, True, False, False, True], ), ( ["2a2b", "a+b", "++a", "a.b++", "-b"], [False, False, False, False, False], ), ( ["2a2b", "1+3", "9.0++a", "+", "-"], [False, False, False, False, False], ), ], ) def test_str_isfloat(data, expected): sr = cudf.Series(data, dtype="str") expected = cudf.Series(expected) actual = sr.str.isfloat() assert_eq(expected, actual) sr = as_index(data) expected = as_index(expected) actual = sr.str.isfloat() assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ ["a", "b", "c", "d", "e"], ["a", "z", ".", '"', "aa", "zz"], ["aa", "zz"], ["z", "a", "zz", "aa"], ["1", "2", "3", "4", "5"], [""], ["a"], ["hello"], ["small text", "this is a larger text......"], ["👋🏻", "🔥", "🥇"], ["This is 💯", "here is a calendar", "📅"], ["", ".", ";", "[", "]"], ["\t", ".", "\n", "\n\t", "\t\n"], ], ) def test_str_min(data): psr = pd.Series(data) sr = cudf.Series(data) assert_eq(psr.min(), sr.min()) @pytest.mark.parametrize( "data", [ ["a", "b", "c", "d", "e"], ["a", "z", ".", '"', "aa", "zz"], ["aa", "zz"], ["z", "a", "zz", "aa"], ["1", "2", "3", "4", "5"], [""], ["a"], ["hello"], ["small text", "this is a larger text......"], ["👋🏻", "🔥", "🥇"], ["This is 💯", "here is a calendar", "📅"], ["", ".", ";", "[", "]"], ["\t", ".", "\n", "\n\t", "\t\n"], ], ) def test_str_max(data): psr = pd.Series(data) sr = cudf.Series(data) assert_eq(psr.max(), sr.max()) @pytest.mark.parametrize( "data", [ ["a", "b", "c", "d", "e"], ["a", "z", ".", '"', "aa", "zz"], ["aa", "zz"], ["z", "a", "zz", "aa"], ["1", "2", "3", "4", "5"], [""], ["a"], ["hello"], ["small text", "this is a larger text......"], ["👋🏻", "🔥", "🥇"], ["This is 💯", "here is a calendar", "📅"], ["", ".", ";", "[", "]"], ["\t", ".", "\n", "\n\t", "\t\n"], ], ) def test_str_sum(data): psr = pd.Series(data) sr = cudf.Series(data) assert_eq(psr.sum(), sr.sum()) def test_str_mean(): sr = cudf.Series(["a", "b", "c", "d", "e"]) with pytest.raises(TypeError): sr.mean() def test_string_product(): psr = pd.Series(["1", "2", "3", "4", "5"]) sr = cudf.Series(["1", "2", "3", "4", "5"]) assert_exceptions_equal( lfunc=psr.product, rfunc=sr.product, ) def test_string_var(): psr = pd.Series(["1", "2", "3", "4", "5"]) sr = cudf.Series(["1", "2", "3", "4", "5"]) assert_exceptions_equal(lfunc=psr.var, rfunc=sr.var) def test_string_std(): psr = pd.Series(["1", "2", "3", "4", "5"]) sr = cudf.Series(["1", "2", "3", "4", "5"]) assert_exceptions_equal(lfunc=psr.std, rfunc=sr.std) def test_string_slice_with_mask(): actual = cudf.Series(["hi", "hello", None]) expected = actual[0:3] assert actual._column.base_size == 3 assert_eq(actual._column.base_size, expected._column.base_size) assert_eq(actual._column.null_count, expected._column.null_count) assert_eq(actual, expected) @pytest.mark.parametrize( "data", [ [ """ { "store":{ "book":[ { "category":"reference", "author":"Nigel Rees", "title":"Sayings of the Century", "price":8.95 }, { "category":"fiction", "author":"Evelyn Waugh", "title":"Sword of Honour", "price":12.99 } ] } } """ ], [ """ { "store":{ "book":[ { "category":"reference", "author":"Nigel Rees", "title":"Sayings of the Century", "price":8.95 } ] } } """, """ { "store":{ "book":[ { "category":"fiction", "author":"Evelyn Waugh", "title":"Sword of Honour", "price":12.99 } ] } } """, ], ], ) def test_string_get_json_object_n(data): gs = cudf.Series(data) ps = pd.Series(data) assert_eq( json.loads(gs.str.get_json_object("$.store")[0]), ps.apply(lambda x: json.loads(x)["store"])[0], ) assert_eq( json.loads(gs.str.get_json_object("$.store.book")[0]), ps.apply(lambda x: json.loads(x)["store"]["book"])[0], ) assert_eq( gs.str.get_json_object("$.store.book[0].category"), ps.apply(lambda x: json.loads(x)["store"]["book"][0]["category"]), ) @pytest.mark.parametrize( "json_path", ["$.store", "$.store.book", "$.store.book[*].category", " "] ) def test_string_get_json_object_empty_json_strings(json_path): gs = cudf.Series( [ """ { "":{ "":[ { "":"", "":"", "":"" }, { "":"fiction", "":"", "title":"" } ] } } """ ] ) got = gs.str.get_json_object(json_path) expect = cudf.Series([None], dtype="object") assert_eq(got, expect) @pytest.mark.parametrize("json_path", ["a", ".", "/.store"]) def test_string_get_json_object_invalid_JSONPath(json_path): gs = cudf.Series( [ """ { "store":{ "book":[ { "category":"reference", "author":"Nigel Rees", "title":"Sayings of the Century", "price":8.95 }, { "category":"fiction", "author":"Evelyn Waugh", "title":"Sword of Honour", "price":12.99 } ] } } """ ] ) with pytest.raises(ValueError): gs.str.get_json_object(json_path) def test_string_get_json_object_allow_single_quotes(): gs = cudf.Series( [ """ { "store":{ "book":[ { 'author':"Nigel Rees", "title":'Sayings of the Century', "price":8.95 }, { "category":"fiction", "author":"Evelyn Waugh", 'title':"Sword of Honour", "price":12.99 } ] } } """ ] ) assert_eq( gs.str.get_json_object( "$.store.book[0].author", allow_single_quotes=True ), cudf.Series(["Nigel Rees"]), ) assert_eq( gs.str.get_json_object( "$.store.book[*].title", allow_single_quotes=True ), cudf.Series(["['Sayings of the Century',\"Sword of Honour\"]"]), ) assert_eq( gs.str.get_json_object( "$.store.book[0].author", allow_single_quotes=False ), cudf.Series([None]), ) assert_eq( gs.str.get_json_object( "$.store.book[*].title", allow_single_quotes=False ), cudf.Series([None]), ) def test_string_get_json_object_strip_quotes_from_single_strings(): gs = cudf.Series( [ """ { "store":{ "book":[ { "author":"Nigel Rees", "title":"Sayings of the Century", "price":8.95 }, { "category":"fiction", "author":"Evelyn Waugh", "title":"Sword of Honour", "price":12.99 } ] } } """ ] ) assert_eq( gs.str.get_json_object( "$.store.book[0].author", strip_quotes_from_single_strings=True ), cudf.Series(["Nigel Rees"]), ) assert_eq( gs.str.get_json_object( "$.store.book[*].title", strip_quotes_from_single_strings=True ), cudf.Series(['["Sayings of the Century","Sword of Honour"]']), ) assert_eq( gs.str.get_json_object( "$.store.book[0].author", strip_quotes_from_single_strings=False ), cudf.Series(['"Nigel Rees"']), ) assert_eq( gs.str.get_json_object( "$.store.book[*].title", strip_quotes_from_single_strings=False ), cudf.Series(['["Sayings of the Century","Sword of Honour"]']), ) def test_string_get_json_object_missing_fields_as_nulls(): gs = cudf.Series( [ """ { "store":{ "book":[ { "author":"Nigel Rees", "title":"Sayings of the Century", "price":8.95 }, { "category":"fiction", "author":"Evelyn Waugh", "title":"Sword of Honour", "price":12.99 } ] } } """ ] ) assert_eq( gs.str.get_json_object( "$.store.book[0].category", missing_fields_as_nulls=True ), cudf.Series(["null"]), ) assert_eq( gs.str.get_json_object( "$.store.book[*].category", missing_fields_as_nulls=True ), cudf.Series(['[null,"fiction"]']), ) assert_eq( gs.str.get_json_object( "$.store.book[0].category", missing_fields_as_nulls=False ), cudf.Series([None]), ) assert_eq( gs.str.get_json_object( "$.store.book[*].category", missing_fields_as_nulls=False ), cudf.Series(['["fiction"]']), ) def test_str_join_lists_error(): sr = cudf.Series([["a", "a"], ["b"], ["c"]]) with pytest.raises( ValueError, match="sep_na_rep cannot be defined when `sep` is scalar." ): sr.str.join(sep="-", sep_na_rep="-") with pytest.raises( TypeError, match=re.escape( "string_na_rep should be a string scalar, got [10, 20] of type " ": <class 'list'>" ), ): sr.str.join(string_na_rep=[10, 20]) with pytest.raises( ValueError, match=re.escape( "sep should be of similar size to the series, got: 2, expected: 3" ), ): sr.str.join(sep=["=", "-"]) with pytest.raises( TypeError, match=re.escape( "sep_na_rep should be a string scalar, got " "['na'] of type: <class 'list'>" ), ): sr.str.join(sep=["-", "+", "."], sep_na_rep=["na"]) with pytest.raises( TypeError, match=re.escape( "sep should be an str, array-like or Series object, " "found <class 'cudf.core.dataframe.DataFrame'>" ), ): sr.str.join(sep=cudf.DataFrame()) @pytest.mark.parametrize( "sr,sep,string_na_rep,sep_na_rep,expected", [ ( cudf.Series([["a", "a"], ["b"], ["c"]]), "-", None, None, cudf.Series(["a-a", "b", "c"]), ), ( cudf.Series([["a", "b"], [None], [None, "hello", None, "world"]]), "__", "=", None, cudf.Series(["a__b", None, "=__hello__=__world"]), ), ( cudf.Series( [ ["a", None, "b"], [None], [None, "hello", None, "world"], None, ] ), ["-", "_", "**", "!"], None, None, cudf.Series(["a--b", None, "**hello****world", None]), ), ( cudf.Series( [ ["a", None, "b"], [None], [None, "hello", None, "world"], None, ] ), ["-", "_", "**", None], "rep_str", "sep_str", cudf.Series( ["a-rep_str-b", None, "rep_str**hello**rep_str**world", None] ), ), ( cudf.Series([[None, "a"], [None], None]), ["-", "_", None], "rep_str", None, cudf.Series(["rep_str-a", None, None]), ), ( cudf.Series([[None, "a"], [None], None]), ["-", "_", None], None, "sep_str", cudf.Series(["-a", None, None]), ), ], ) def test_str_join_lists(sr, sep, string_na_rep, sep_na_rep, expected): actual = sr.str.join( sep=sep, string_na_rep=string_na_rep, sep_na_rep=sep_na_rep ) assert_eq(actual, expected) @pytest.mark.parametrize( "patterns, expected", [ ( lambda: ["a", "s", "g", "i", "o", "r"], [ [-1, 0, 5, 3, -1, 2], [-1, -1, -1, -1, 1, -1], [2, 0, -1, -1, -1, 3], [-1, -1, -1, 0, -1, -1], ], ), ( lambda: cudf.Series(["a", "string", "g", "inn", "o", "r", "sea"]), [ [-1, 0, 5, -1, -1, 2, -1], [-1, -1, -1, -1, 1, -1, -1], [2, -1, -1, -1, -1, 3, 0], [-1, -1, -1, -1, -1, -1, -1], ], ), ], ) def test_str_find_multiple(patterns, expected): s = cudf.Series(["strings", "to", "search", "in"]) t = patterns() expected = cudf.Series(expected) # We convert to pandas because find_multiple returns ListDtype(int32) # and expected is ListDtype(int64). # Currently there is no easy way to type-cast these to match. assert_eq(s.str.find_multiple(t).to_pandas(), expected.to_pandas()) s = cudf.Index(s) t = cudf.Index(t) expected.index = s assert_eq(s.str.find_multiple(t).to_pandas(), expected.to_pandas()) def test_str_find_multiple_error(): s = cudf.Series(["strings", "to", "search", "in"]) with pytest.raises( TypeError, match=re.escape( "patterns should be an array-like or a Series object, found " "<class 'str'>" ), ): s.str.find_multiple("a") t = cudf.Series([1, 2, 3]) with pytest.raises( TypeError, match=re.escape("patterns can only be of 'string' dtype, got: int64"), ): s.str.find_multiple(t) def test_str_iterate_error(): s = cudf.Series(["abc", "xyz"]) with pytest.raises(TypeError): iter(s.str)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_onehot.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. from string import ascii_lowercase import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq pytestmark = pytest.mark.spilling @pytest.mark.parametrize( "data, index", [ (np.arange(10), None), (["abc", "zyx", "pppp"], None), ([], None), (pd.Series(["cudf", "hello", "pandas"] * 10, dtype="category"), None), (range(10), [1, 2, 3, 4, 5] * 2), ], ) def test_get_dummies(data, index): gdf = cudf.DataFrame({"x": data}, index=index) pdf = pd.DataFrame({"x": data}, index=index) encoded_expected = pd.get_dummies(pdf, prefix="test") with pytest.warns(FutureWarning): encoded_actual = cudf.get_dummies(gdf, prefix="test") assert_eq( encoded_expected, encoded_actual, check_dtype=len(data) != 0, ) encoded_actual = cudf.get_dummies(gdf, prefix="test", dtype=np.uint8) assert_eq( encoded_expected, encoded_actual, check_dtype=len(data) != 0, ) @pytest.mark.parametrize("n_cols", [5, 10, 20]) def test_onehot_get_dummies_multicol(n_cols): n_categories = 5 data = dict( zip(ascii_lowercase, (np.arange(n_categories) for _ in range(n_cols))) ) gdf = cudf.DataFrame(data) pdf = pd.DataFrame(data) encoded_expected = pd.get_dummies(pdf, prefix="test") with pytest.warns(FutureWarning): encoded_actual = cudf.get_dummies(gdf, prefix="test") assert_eq(encoded_expected, encoded_actual) @pytest.mark.parametrize("nan_as_null", [True, False]) @pytest.mark.parametrize("dummy_na", [True, False]) def test_onehost_get_dummies_dummy_na(nan_as_null, dummy_na): pdf = pd.DataFrame({"a": [0, 1, np.nan]}) df = cudf.DataFrame.from_pandas(pdf, nan_as_null=nan_as_null) expected = pd.get_dummies(pdf, dummy_na=dummy_na, columns=["a"]) with pytest.warns(FutureWarning): actual = cudf.get_dummies(df, dummy_na=dummy_na, columns=["a"]) if dummy_na and nan_as_null: actual = actual.rename(columns={"a_<NA>": "a_nan"})[expected.columns] assert_eq(expected, actual) @pytest.mark.parametrize( "prefix", [ ["a", "b", "c"], "", None, {"first": "one", "second": "two", "third": "three"}, "--", ], ) @pytest.mark.parametrize( "prefix_sep", [ ["a", "b", "c"], "", "++", {"first": "*******", "second": "__________", "third": "#########"}, ], ) def test_get_dummies_prefix_sep(prefix, prefix_sep): data = { "first": ["1", "2", "3"], "second": ["abc", "def", "ghi"], "third": ["ji", "ji", "ji"], } gdf = cudf.DataFrame(data) pdf = pd.DataFrame(data) encoded_expected = pd.get_dummies( pdf, prefix=prefix, prefix_sep=prefix_sep ) with pytest.warns(FutureWarning): encoded_actual = cudf.get_dummies( gdf, prefix=prefix, prefix_sep=prefix_sep ) assert_eq(encoded_expected, encoded_actual) def test_get_dummies_with_nan(): df = cudf.DataFrame( {"a": cudf.Series([1, 2, np.nan, None], nan_as_null=False)} ) expected = pd.get_dummies( df.to_pandas(nullable=True), dummy_na=True, columns=["a"] ) with pytest.warns(FutureWarning): actual = cudf.get_dummies(df, dummy_na=True, columns=["a"]) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ lambda: cudf.Series(["abc", "l", "a", "abc", "z", "xyz"]), lambda: cudf.Index([None, 1, 2, 3.3, None, 0.2]), lambda: cudf.Series([0.1, 2, 3, None, np.nan]), lambda: cudf.Series([23678, 324, 1, 324], name="abc"), ], ) @pytest.mark.parametrize("prefix_sep", ["-", "#"]) @pytest.mark.parametrize("prefix", [None, "hi"]) @pytest.mark.parametrize("dtype", ["uint8", "int16"]) def test_get_dummies_array_like(data, prefix_sep, prefix, dtype): data = data() pd_data = data.to_pandas() expected = pd.get_dummies( pd_data, prefix=prefix, prefix_sep=prefix_sep, dtype=dtype ) actual = cudf.get_dummies( data, prefix=prefix, prefix_sep=prefix_sep, dtype=dtype ) assert_eq(expected, actual) def test_get_dummies_array_like_with_nan(): ser = cudf.Series([0.1, 2, 3, None, np.nan], nan_as_null=False) expected = pd.get_dummies( ser.to_pandas(nullable=True), dummy_na=True, prefix="a", prefix_sep="_" ) with pytest.warns(FutureWarning): actual = cudf.get_dummies( ser, dummy_na=True, prefix="a", prefix_sep="_" ) assert_eq(expected, actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_query.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. import datetime import inspect from itertools import product import numpy as np import pandas as pd import pytest import cudf from cudf import DataFrame from cudf.testing._utils import assert_eq from cudf.utils import queryutils _params_query_parser = [] _params_query_parser.append(("a > @b", ("a", "__CUDF_ENVREF__b"))) _params_query_parser.append(("(a + b) <= @c", ("a", "b", "__CUDF_ENVREF__c"))) _params_query_parser.append(("a > b if a > 0 else b > a", ("a", "b"))) @pytest.mark.parametrize("text,expect_args", _params_query_parser) def test_query_parser(text, expect_args): info = queryutils.query_parser(text) fn = queryutils.query_builder(info, "myfoo") assert callable(fn) argspec = inspect.getfullargspec(fn) assert tuple(argspec.args) == tuple(expect_args) params_query_data = list(product([1, 2, 7, 8, 9, 16, 100, 129], range(2))) params_query_fn = [ (lambda a, b: a < b, "a < b"), (lambda a, b: a * 2 >= b, "a * 2 >= b"), (lambda a, b: 2 * (a + b) > (a + b) / 2, "2 * (a + b) > (a + b) / 2"), ] nulls = [True, False] @pytest.mark.parametrize( "data,fn,nulls", product(params_query_data, params_query_fn, nulls) ) def test_query(data, fn, nulls): # prepare nelem, seed = data expect_fn, query_expr = fn np.random.seed(seed) pdf = pd.DataFrame() pdf["a"] = np.arange(nelem) pdf["b"] = np.random.random(nelem) * nelem if nulls: pdf.loc[::2, "a"] = None gdf = cudf.from_pandas(pdf) assert_eq(pdf.query(query_expr), gdf.query(query_expr)) params_query_env_fn = [ (lambda a, b, c, d: a * c > b + d, "a * @c > b + @d"), ( lambda a, b, c, d: ((a / c) < d) | ((b**c) > d), "((a / @c) < @d) | ((b ** @c) > @d)", ), ] @pytest.mark.parametrize( "data,fn", product(params_query_data, params_query_env_fn) ) def test_query_ref_env(data, fn): # prepare nelem, seed = data expect_fn, query_expr = fn np.random.seed(seed) df = DataFrame() df["a"] = aa = np.arange(nelem) df["b"] = bb = np.random.random(nelem) * nelem c = 2.3 d = 1.2 # udt expect_mask = expect_fn(aa, bb, c, d) print(expect_mask) df2 = df.query(query_expr) # check assert len(df2) == np.count_nonzero(expect_mask) np.testing.assert_array_almost_equal(df2["a"].to_numpy(), aa[expect_mask]) np.testing.assert_array_almost_equal(df2["b"].to_numpy(), bb[expect_mask]) def test_query_env_changing(): df = DataFrame() df["a"] = aa = np.arange(100) expr = "a < @c" # first attempt c = 10 got = df.query(expr) np.testing.assert_array_equal(aa[aa < c], got["a"].to_numpy()) # change env c = 50 got = df.query(expr) np.testing.assert_array_equal(aa[aa < c], got["a"].to_numpy()) def test_query_local_dict(): df = DataFrame() df["a"] = aa = np.arange(100) expr = "a < @val" got = df.query(expr, local_dict={"val": 10}) np.testing.assert_array_equal(aa[aa < 10], got["a"].to_numpy()) # test for datetime df = DataFrame() data = np.array(["2018-10-07", "2018-10-08"], dtype="datetime64") df["datetimes"] = data search_date = datetime.datetime.strptime("2018-10-08", "%Y-%m-%d") expr = "datetimes==@search_date" got = df.query(expr, local_dict={"search_date": search_date}) np.testing.assert_array_equal(data[1], got["datetimes"].to_numpy()) def test_query_splitted_combine(): np.random.seed(0) df = pd.DataFrame( {"x": np.random.randint(0, 5, size=10), "y": np.random.normal(size=10)} ) gdf = DataFrame.from_pandas(df) # Split the GDF s1 = gdf[:5] s2 = gdf[5:] # Do the query expr = "x > 2" q1 = s1.query(expr) q2 = s2.query(expr) # Combine got = cudf.concat([q1, q2]).to_pandas() # Should equal to just querying the original GDF expect = gdf.query(expr).to_pandas() assert_eq(got, expect, check_index_type=True) def test_query_empty_frames(): empty_pdf = pd.DataFrame({"a": [], "b": []}) empty_gdf = DataFrame.from_pandas(empty_pdf) # Do the query expr = "a > 2" got = empty_gdf.query(expr).to_pandas() expect = empty_pdf.query(expr) # assert equal results assert_eq(got, expect) @pytest.mark.parametrize(("a_val", "b_val", "c_val"), [(4, 3, 15)]) @pytest.mark.parametrize("index", ["a", ["a", "b"]]) @pytest.mark.parametrize( "query", [ "a < @a_val", "a < @a_val and b > @b_val", "(a < @a_val and b >@b_val) or c >@c_val", ], ) def test_query_with_index_name(index, query, a_val, b_val, c_val): pdf = pd.DataFrame( { "a": [1, None, 3, 4, 5], "b": [5, 4, 3, 2, 1], "c": [12, 15, 17, 19, 27], } ) pdf.set_index(index) gdf = DataFrame.from_pandas(pdf) out = gdf.query(query) expect = pdf.query(query) assert_eq(out, expect) @pytest.mark.parametrize(("a_val", "b_val", "c_val"), [(4, 3, 15)]) @pytest.mark.parametrize( "query", [ "index < @a_val", "index < @a_val and b > @b_val", "(index < @a_val and b >@b_val) or c >@c_val", ], ) def test_query_with_index_keyword(query, a_val, b_val, c_val): pdf = pd.DataFrame( { "a": [1, None, 3, 4, 5], "b": [5, 4, 3, 2, 1], "c": [12, 15, 17, 19, 27], } ) pdf.set_index("a") gdf = DataFrame.from_pandas(pdf) out = gdf.query(query) expect = pdf.query(query) assert_eq(out, expect) @pytest.mark.parametrize( "data, query", [ # Only need to test the dtypes that pandas # supports but that we do not (["a", "b", "c"], "data == 'a'"), ], ) def test_query_unsupported_dtypes(data, query): gdf = cudf.DataFrame({"data": data}) # make sure the query works in pandas pdf = gdf.to_pandas() pdf_result = pdf.query(query) expect = pd.DataFrame({"data": ["a"]}) assert_eq(expect, pdf_result) # but fails in cuDF with pytest.raises(TypeError): gdf.query(query)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_query_mask.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq _data = [ {"a": [0, 1.0, 2.0, None, np.nan, None, 3, 5]}, {"a": [0, 1.0, 2.0, None, 3, np.nan, None, 4]}, {"a": [0, 1.0, 2.0, None, 3, np.nan, None, 4, None, 9]}, ] _queries = [ "a == 3", # "a != 3", # incompatible with pandas "a < 3", "a <= 3", "a < 3", "a >= 3", ] @pytest.mark.parametrize("data", _data) @pytest.mark.parametrize("query", _queries) def test_mask_0(data, query): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) pdf_q_res = pdf.query(query) gdf_q_res = gdf.query(query) assert_eq(pdf_q_res, gdf_q_res) @pytest.mark.parametrize("data", _data) @pytest.mark.parametrize("nan_as_null", [False, True]) @pytest.mark.parametrize("query", _queries) def test_mask_1(data, nan_as_null, query): pdf = pd.DataFrame(data) gdf = cudf.DataFrame.from_pandas(pdf, nan_as_null=nan_as_null) pdf_q_res = pdf.query(query) gdf_q_res = gdf.query(query) assert_eq(pdf_q_res, gdf_q_res) @pytest.mark.parametrize("data", _data) @pytest.mark.parametrize("query", _queries) def test_mask_2(data, query): pdf = pd.DataFrame(data) gdf = cudf.DataFrame(data) pdf_q_res = pdf.query(query) gdf_q_res = gdf.query(query) assert_eq(pdf_q_res, gdf_q_res) @pytest.mark.parametrize("data", _data) @pytest.mark.parametrize("query", _queries) def test_dataframe_initializer(data, query): pdf = pd.DataFrame(data) gdf = cudf.DataFrame(data) pdf_q_res = pdf.query(query) gdf_q_res = gdf.query(query) assert_eq(pdf_q_res, gdf_q_res)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_join_order.py
# Copyright (c) 2023, NVIDIA CORPORATION. import itertools import operator import string from collections import defaultdict import numpy as np import pytest import cudf from cudf.core._compat import PANDAS_GE_220 from cudf.testing._utils import assert_eq @pytest.fixture(params=[False, True], ids=["unsorted", "sorted"]) def sort(request): return request.param @pytest.fixture def left(): left_key = [1, 3, 2, 1, 1, 2, 5, 1, 4, 5, 8, 12, 12312, 1] * 100 left_val = list(range(len(left_key))) return cudf.DataFrame({"key": left_key, "val": left_val}) @pytest.fixture def right(): right_key = [12312, 12312, 3, 2, 1, 1, 5, 7, 2] * 200 right_val = list( itertools.islice(itertools.cycle(string.ascii_letters), len(right_key)) ) return cudf.DataFrame({"key": right_key, "val": right_val}) if PANDAS_GE_220: # Behaviour in sort=False case didn't match documentation in many # cases prior to https://github.com/pandas-dev/pandas/pull/54611 # (released as part of pandas 2.2) def expected(left, right, sort, *, how): left = left.to_pandas() right = right.to_pandas() return left.merge(right, on="key", how=how, sort=sort) else: def expect_inner(left, right, sort): left_key = left.key.values_host.tolist() left_val = left.val.values_host.tolist() right_key = right.key.values_host.tolist() right_val = right.val.values_host.tolist() right_have = defaultdict(list) for i, k in enumerate(right_key): right_have[k].append(i) keys = [] val_x = [] val_y = [] for k, v in zip(left_key, left_val): if k not in right_have: continue for i in right_have[k]: keys.append(k) val_x.append(v) val_y.append(right_val[i]) if sort: # Python sort is stable, so this will preserve input order for # equal items. keys, val_x, val_y = zip( *sorted(zip(keys, val_x, val_y), key=operator.itemgetter(0)) ) return cudf.DataFrame({"key": keys, "val_x": val_x, "val_y": val_y}) def expect_left(left, right, sort): left_key = left.key.values_host.tolist() left_val = left.val.values_host.tolist() right_key = right.key.values_host.tolist() right_val = right.val.values_host.tolist() right_have = defaultdict(list) for i, k in enumerate(right_key): right_have[k].append(i) keys = [] val_x = [] val_y = [] for k, v in zip(left_key, left_val): if k not in right_have: right_vals = [None] else: right_vals = [right_val[i] for i in right_have[k]] for rv in right_vals: keys.append(k) val_x.append(v) val_y.append(rv) if sort: # Python sort is stable, so this will preserve input order for # equal items. keys, val_x, val_y = zip( *sorted(zip(keys, val_x, val_y), key=operator.itemgetter(0)) ) return cudf.DataFrame({"key": keys, "val_x": val_x, "val_y": val_y}) def expect_outer(left, right, sort): left_key = left.key.values_host.tolist() left_val = left.val.values_host.tolist() right_key = right.key.values_host.tolist() right_val = right.val.values_host.tolist() right_have = defaultdict(list) for i, k in enumerate(right_key): right_have[k].append(i) keys = [] val_x = [] val_y = [] for k, v in zip(left_key, left_val): if k not in right_have: right_vals = [None] else: right_vals = [right_val[i] for i in right_have[k]] for rv in right_vals: keys.append(k) val_x.append(v) val_y.append(rv) left_have = set(left_key) for k, v in zip(right_key, right_val): if k not in left_have: keys.append(k) val_x.append(None) val_y.append(v) # Python sort is stable, so this will preserve input order for # equal items. # outer joins are always sorted, but we test both sort values keys, val_x, val_y = zip( *sorted(zip(keys, val_x, val_y), key=operator.itemgetter(0)) ) return cudf.DataFrame({"key": keys, "val_x": val_x, "val_y": val_y}) def expected(left, right, sort, *, how): if how == "inner": return expect_inner(left, right, sort) elif how == "outer": return expect_outer(left, right, sort) elif how == "left": return expect_left(left, right, sort) elif how == "right": return expect_left(right, left, sort).rename( {"val_x": "val_y", "val_y": "val_x"}, axis=1 ) else: raise NotImplementedError() @pytest.mark.parametrize("how", ["inner", "left", "right", "outer"]) def test_join_ordering_pandas_compat(left, right, sort, how): with cudf.option_context("mode.pandas_compatible", True): actual = left.merge(right, on="key", how=how, sort=sort) expect = expected(left, right, sort, how=how) assert_eq(expect, actual) @pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("on_index", [True, False]) @pytest.mark.parametrize("left_unique", [True, False]) @pytest.mark.parametrize("left_monotonic", [True, False]) @pytest.mark.parametrize("right_unique", [True, False]) @pytest.mark.parametrize("right_monotonic", [True, False]) def test_merge_combinations( request, how, sort, on_index, left_unique, left_monotonic, right_unique, right_monotonic, ): request.applymarker( pytest.mark.xfail( condition=how == "outer" and on_index and left_unique and not left_monotonic and right_unique and not right_monotonic, reason="https://github.com/pandas-dev/pandas/issues/55992", ) ) left = [2, 3] if left_unique: left.append(4 if left_monotonic else 1) else: left.append(3 if left_monotonic else 2) right = [2, 3] if right_unique: right.append(4 if right_monotonic else 1) else: right.append(3 if right_monotonic else 2) left = cudf.DataFrame({"key": left}) right = cudf.DataFrame({"key": right}) if on_index: left = left.set_index("key") right = right.set_index("key") on_kwargs = {"left_index": True, "right_index": True} else: on_kwargs = {"on": "key"} with cudf.option_context("mode.pandas_compatible", True): result = cudf.merge(left, right, how=how, sort=sort, **on_kwargs) if on_index: left = left.reset_index() right = right.reset_index() if how in ["left", "right", "inner"]: if how in ["left", "inner"]: expected, other, other_unique = left, right, right_unique else: expected, other, other_unique = right, left, left_unique if how == "inner": keep_values = set(left["key"].values_host).intersection( right["key"].values_host ) keep_mask = expected["key"].isin(keep_values) expected = expected[keep_mask] if sort: expected = expected.sort_values("key") if not other_unique: other_value_counts = other["key"].value_counts() repeats = other_value_counts.reindex( expected["key"].values, fill_value=1 ) repeats = repeats.astype(np.intp) expected = expected["key"].repeat(repeats.values) expected = expected.to_frame() elif how == "outer": if on_index and left_unique and left["key"].equals(right["key"]): expected = cudf.DataFrame({"key": left["key"]}) else: left_counts = left["key"].value_counts() right_counts = right["key"].value_counts() expected_counts = left_counts.mul(right_counts, fill_value=1) expected_counts = expected_counts.astype(np.intp) expected = expected_counts.index.values_host.repeat( expected_counts.values_host ) expected = cudf.DataFrame({"key": expected}) expected = expected.sort_values("key") if on_index: expected = expected.set_index("key") else: expected = expected.reset_index(drop=True) assert_eq(result, expected)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_dataframe_copy.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. from copy import copy, deepcopy import cupy as cp import numpy as np import pandas as pd import pytest from cudf.core.dataframe import DataFrame from cudf.testing._utils import ALL_TYPES, assert_eq, assert_neq """ DataFrame copy expectations * A shallow copy constructs a new compound object and then (to the extent possible) inserts references into it to the objects found in the original. * A deep copy constructs a new compound object and then, recursively, inserts copies into it of the objects found in the original. A cuDF DataFrame is a compound object containing a few members: _index, _size, _cols, where _cols is an OrderedDict """ @pytest.mark.parametrize( "copy_parameters", [ {"fn": lambda x: x.copy(), "expected_equality": False}, {"fn": lambda x: x.copy(deep=True), "expected_equality": False}, {"fn": lambda x: copy(x), "expected_equality": False}, {"fn": lambda x: deepcopy(x), "expected_equality": False}, ], ) def test_dataframe_deep_copy(copy_parameters): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"] ) gdf = DataFrame.from_pandas(pdf) copy_pdf = copy_parameters["fn"](pdf) copy_gdf = copy_parameters["fn"](gdf) copy_pdf["b"] = [0, 0, 0] copy_gdf["b"] = [0, 0, 0] pdf_is_equal = np.array_equal(pdf["b"].values, copy_pdf["b"].values) gdf_is_equal = np.array_equal( gdf["b"].to_numpy(), copy_gdf["b"].to_numpy() ) assert pdf_is_equal == copy_parameters["expected_equality"] assert gdf_is_equal == copy_parameters["expected_equality"] @pytest.mark.parametrize( "copy_parameters", [ {"fn": lambda x: x.copy(), "expected_equality": False}, {"fn": lambda x: x.copy(deep=True), "expected_equality": False}, {"fn": lambda x: copy(x), "expected_equality": False}, {"fn": lambda x: deepcopy(x), "expected_equality": False}, ], ) def test_dataframe_deep_copy_and_insert(copy_parameters): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"] ) gdf = DataFrame.from_pandas(pdf) copy_pdf = copy_parameters["fn"](pdf) copy_gdf = copy_parameters["fn"](gdf) copy_pdf["b"] = [0, 0, 0] copy_gdf["b"] = [0, 0, 0] pdf_is_equal = np.array_equal(pdf["b"].values, copy_pdf["b"].values) gdf_is_equal = np.array_equal( gdf["b"].to_numpy(), copy_gdf["b"].to_numpy() ) assert pdf_is_equal == copy_parameters["expected_equality"] assert gdf_is_equal == copy_parameters["expected_equality"] """ DataFrame copy bounds checking - sizes 0 through 10 perform as expected_equality """ @pytest.mark.parametrize( "copy_fn", [ lambda x: x.copy(), lambda x: x.copy(deep=True), lambda x: copy(x), lambda x: deepcopy(x), lambda x: x.copy(deep=False), ], ) @pytest.mark.parametrize("ncols", [0, 1, 10]) @pytest.mark.parametrize("data_type", ALL_TYPES) def test_cudf_dataframe_copy(copy_fn, ncols, data_type): pdf = pd.DataFrame() for i in range(ncols): pdf[chr(i + ord("a"))] = pd.Series( np.random.randint(0, 1000, 20) ).astype(data_type) df = DataFrame.from_pandas(pdf) copy_df = copy_fn(df) assert_eq(df, copy_df) @pytest.mark.parametrize( "copy_fn", [ lambda x: x.copy(), lambda x: x.copy(deep=True), lambda x: copy(x), lambda x: deepcopy(x), lambda x: x.copy(deep=False), ], ) @pytest.mark.parametrize("ncols", [0, 1, 10]) @pytest.mark.parametrize("data_type", ALL_TYPES) def test_cudf_dataframe_copy_then_insert(copy_fn, ncols, data_type): pdf = pd.DataFrame() for i in range(ncols): pdf[chr(i + ord("a"))] = pd.Series( np.random.randint(0, 1000, 20) ).astype(data_type) df = DataFrame.from_pandas(pdf) copy_df = copy_fn(df) copy_pdf = copy_fn(pdf) copy_df["aa"] = pd.Series(np.random.randint(0, 1000, 20)).astype(data_type) copy_pdf["aa"] = pd.Series(np.random.randint(0, 1000, 20)).astype( data_type ) assert not copy_pdf.to_string().split() == pdf.to_string().split() assert not copy_df.to_string().split() == df.to_string().split() def test_deep_copy_write_in_place(): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"] ) gdf = DataFrame.from_pandas(pdf) cdf = gdf.copy(deep=True) sr = gdf["b"] # Write a value in-place on the deep copy. # This should only affect the copy and not the original. cp.asarray(sr._column)[1] = 42 assert_neq(gdf, cdf) def test_shallow_copy_write_in_place(): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"] ) gdf = DataFrame.from_pandas(pdf) cdf = gdf.copy(deep=False) sr = gdf["a"] # Write a value in-place on the shallow copy. # This should change the copy and original. cp.asarray(sr._column)[1] = 42 assert_eq(gdf, cdf) @pytest.mark.xfail(reason="cudf column-wise shallow copy is immutable") def test_dataframe_copy_shallow(): pdf = pd.DataFrame( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"] ) gdf = DataFrame.from_pandas(pdf) copy_pdf = pdf.copy(deep=False) copy_gdf = gdf.copy(deep=False) copy_pdf["b"] = [0, 0, 0] copy_gdf["b"] = [0, 0, 0] assert_eq(pdf["b"], copy_pdf["b"]) assert_eq(gdf["b"], copy_gdf["b"])
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_buffer.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import cupy as cp import pytest from cudf.core.buffer import Buffer, as_buffer pytestmark = pytest.mark.spilling arr_len = 10 @pytest.mark.parametrize( "data", [ (cp.zeros(arr_len), True), (cp.zeros((1, arr_len)), True), (cp.zeros((1, arr_len, 1)), True), (cp.zeros((arr_len, arr_len)), True), (cp.zeros((arr_len, arr_len)).reshape(arr_len * arr_len), True), (cp.zeros((arr_len, arr_len))[:, 0], False), ], ) def test_buffer_from_cuda_iface_contiguous(data): data, expect_success = data if expect_success: as_buffer(data.view("|u1")) else: with pytest.raises(ValueError): as_buffer(data.view("|u1")) @pytest.mark.parametrize( "data", [ cp.arange(arr_len), cp.arange(arr_len).reshape(1, arr_len), cp.arange(arr_len).reshape(1, arr_len, 1), cp.arange(arr_len**2).reshape(arr_len, arr_len), ], ) @pytest.mark.parametrize("dtype", ["uint8", "int8", "float32", "int32"]) def test_buffer_from_cuda_iface_dtype(data, dtype): data = data.astype(dtype) buf = as_buffer(data) got = cp.array(buf).reshape(-1).view("uint8") expect = data.reshape(-1).view("uint8") assert (expect == got).all() def test_buffer_creation_from_any(): ary = cp.arange(arr_len) b = as_buffer(ary, exposed=True) assert isinstance(b, Buffer) assert ary.data.ptr == b.get_ptr(mode="read") assert ary.nbytes == b.size with pytest.raises( ValueError, match="size must be specified when `data` is an integer" ): as_buffer(ary.data.ptr) b = as_buffer(ary.data.ptr, size=ary.nbytes, owner=ary, exposed=True) assert isinstance(b, Buffer) assert ary.data.ptr == b.get_ptr(mode="read") assert ary.nbytes == b.size assert b.owner.owner is ary @pytest.mark.parametrize( "size,expect", [(10, "10B"), (2**10 + 500, "1.49KiB"), (2**20, "1MiB")] ) def test_buffer_repr(size, expect): ary = cp.arange(size, dtype="uint8") buf = as_buffer(ary) assert f"size={expect}" in repr(buf) @pytest.mark.parametrize( "idx", [ slice(0, 0), slice(0, 1), slice(-2, -1), slice(0, arr_len), slice(2, 3), slice(2, -1), ], ) def test_buffer_slice(idx): ary = cp.arange(arr_len, dtype="uint8") buf = as_buffer(ary) expect = ary[idx] got = cp.array(buf[idx]) assert (expect == got).all() @pytest.mark.parametrize( "idx, err_type, err_msg", [ (1, TypeError, "Argument 'key' has incorrect type"), (slice(3, 2), ValueError, "size cannot be negative"), (slice(1, 2, 2), ValueError, "slice must be C-contiguous"), (slice(1, 2, -1), ValueError, "slice must be C-contiguous"), (slice(3, 2, -1), ValueError, "slice must be C-contiguous"), ], ) def test_buffer_slice_fail(idx, err_type, err_msg): ary = cp.arange(arr_len, dtype="uint8") buf = as_buffer(ary) with pytest.raises(err_type, match=err_msg): buf[idx]
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_concat.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. from decimal import Decimal import numpy as np import pandas as pd import pytest import cudf as gd from cudf.api.types import is_categorical_dtype from cudf.core._compat import PANDAS_GE_150, PANDAS_LT_140 from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype, Decimal128Dtype from cudf.testing._utils import assert_eq, assert_exceptions_equal def make_frames(index=None, nulls="none"): df = pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": list("abcde") * 2, } ) df.z = df.z.astype("category") df2 = pd.DataFrame( { "x": range(10, 20), "y": list(map(float, range(10, 20))), "z": list("edcba") * 2, } ) df2.z = df2.z.astype("category") if nulls == "all": df.y = np.full_like(df.y, np.nan) df2.y = np.full_like(df2.y, np.nan) if nulls == "some": mask = np.arange(10) np.random.shuffle(mask) mask = mask[:5] df.loc[mask, "y"] = np.nan df2.loc[mask, "y"] = np.nan gdf = gd.DataFrame.from_pandas(df) gdf2 = gd.DataFrame.from_pandas(df2) if index: df = df.set_index(index) df2 = df2.set_index(index) gdf = gdf.set_index(index) gdf2 = gdf2.set_index(index) return df, df2, gdf, gdf2 @pytest.mark.parametrize("nulls", ["none", "some", "all"]) @pytest.mark.parametrize("index", [False, "z", "y"]) @pytest.mark.parametrize("axis", [0, "index"]) def test_concat_dataframe(index, nulls, axis): if index == "y" and nulls in ("some", "all"): pytest.skip("nulls in columns, dont index") df, df2, gdf, gdf2 = make_frames(index, nulls=nulls) # Make empty frame gdf_empty1 = gdf2[:0] assert len(gdf_empty1) == 0 df_empty1 = gdf_empty1.to_pandas() # DataFrame res = gd.concat([gdf, gdf2, gdf, gdf_empty1], axis=axis).to_pandas() sol = pd.concat([df, df2, df, df_empty1], axis=axis) assert_eq( res, sol, check_names=False, check_categorical=False, check_index_type=True, ) # Series for c in [i for i in ("x", "y", "z") if i != index]: res = gd.concat([gdf[c], gdf2[c], gdf[c]], axis=axis).to_pandas() sol = pd.concat([df[c], df2[c], df[c]], axis=axis) assert_eq( res, sol, check_names=False, check_categorical=False, check_index_type=True, ) # Index res = gd.concat([gdf.index, gdf2.index], axis=axis).to_pandas() sol = df.index.append(df2.index) assert_eq(res, sol, check_names=False, check_categorical=False) @pytest.mark.parametrize( "values", [["foo", "bar"], [1.0, 2.0], pd.Series(["one", "two"], dtype="category")], ) def test_concat_all_nulls(values): pa = pd.Series(values) pb = pd.Series([None]) ps = pd.concat([pa, pb]) ga = gd.Series(values) gb = gd.Series([None]) gs = gd.concat([ga, gb]) assert_eq( ps, gs, check_dtype=False, check_categorical=False, check_index_type=True, ) def test_concat_errors(): df, df2, gdf, gdf2 = make_frames() # No objs assert_exceptions_equal( lfunc=pd.concat, rfunc=gd.concat, lfunc_args_and_kwargs=([], {"objs": []}), rfunc_args_and_kwargs=([], {"objs": []}), ) # All None assert_exceptions_equal( lfunc=pd.concat, rfunc=gd.concat, lfunc_args_and_kwargs=([], {"objs": [None, None]}), rfunc_args_and_kwargs=([], {"objs": [None, None]}), ) # Mismatched types assert_exceptions_equal( lfunc=pd.concat, rfunc=gd.concat, lfunc_args_and_kwargs=([], {"objs": [df, df.index, df.x]}), rfunc_args_and_kwargs=([], {"objs": [gdf, gdf.index, gdf.x]}), ) # Unknown type assert_exceptions_equal( lfunc=pd.concat, rfunc=gd.concat, lfunc_args_and_kwargs=([], {"objs": ["bar", "foo"]}), rfunc_args_and_kwargs=([], {"objs": ["bar", "foo"]}), ) # Mismatched index dtypes gdf3 = gdf2.copy() del gdf3["z"] gdf4 = gdf2.set_index("z") with pytest.raises(ValueError, match="All columns must be the same type"): gd.concat([gdf3, gdf4]) # Bad axis value assert_exceptions_equal( lfunc=pd.concat, rfunc=gd.concat, lfunc_args_and_kwargs=( [], {"objs": [gdf.to_pandas(), gdf2.to_pandas()], "axis": "bad_value"}, ), rfunc_args_and_kwargs=([], {"objs": [gdf, gdf2], "axis": "bad_value"}), ) def test_concat_misordered_columns(): df, df2, gdf, gdf2 = make_frames(False) gdf2 = gdf2[["z", "x", "y"]] df2 = df2[["z", "x", "y"]] res = gd.concat([gdf, gdf2]).to_pandas() sol = pd.concat([df, df2], sort=False) assert_eq( res, sol, check_names=False, check_categorical=False, check_index_type=True, ) @pytest.mark.parametrize("axis", [1, "columns"]) def test_concat_columns(axis): pdf1 = pd.DataFrame(np.random.randint(10, size=(5, 3)), columns=[1, 2, 3]) pdf2 = pd.DataFrame( np.random.randint(10, size=(5, 4)), columns=[4, 5, 6, 7] ) gdf1 = gd.from_pandas(pdf1) gdf2 = gd.from_pandas(pdf2) expect = pd.concat([pdf1, pdf2], axis=axis) got = gd.concat([gdf1, gdf2], axis=axis) assert_eq(expect, got, check_index_type=True) def test_concat_multiindex_dataframe(): gdf = gd.DataFrame( { "w": np.arange(4), "x": np.arange(4), "y": np.arange(4), "z": np.arange(4), } ) gdg = gdf.groupby(["w", "x"]).min() pdg = gdg.to_pandas() pdg1 = pdg.iloc[:, :1] pdg2 = pdg.iloc[:, 1:] gdg1 = gd.from_pandas(pdg1) gdg2 = gd.from_pandas(pdg2) assert_eq( gd.concat([gdg1, gdg2]).astype("float64"), pd.concat([pdg1, pdg2]), check_index_type=True, ) assert_eq( gd.concat([gdg1, gdg2], axis=1), pd.concat([pdg1, pdg2], axis=1), check_index_type=True, ) def test_concat_multiindex_series(): gdf = gd.DataFrame( { "w": np.arange(4), "x": np.arange(4), "y": np.arange(4), "z": np.arange(4), } ) gdg = gdf.groupby(["w", "x"]).min() pdg = gdg.to_pandas() pdg1 = pdg["y"] pdg2 = pdg["z"] gdg1 = gd.from_pandas(pdg1) gdg2 = gd.from_pandas(pdg2) assert_eq( gd.concat([gdg1, gdg2]), pd.concat([pdg1, pdg2]), check_index_type=True ) assert_eq(gd.concat([gdg1, gdg2], axis=1), pd.concat([pdg1, pdg2], axis=1)) def test_concat_multiindex_dataframe_and_series(): gdf = gd.DataFrame( { "w": np.arange(4), "x": np.arange(4), "y": np.arange(4), "z": np.arange(4), } ) gdg = gdf.groupby(["w", "x"]).min() pdg = gdg.to_pandas() pdg1 = pdg[["y", "z"]] pdg2 = pdg["z"] pdg2.name = "a" gdg1 = gd.from_pandas(pdg1) gdg2 = gd.from_pandas(pdg2) assert_eq( gd.concat([gdg1, gdg2], axis=1), pd.concat([pdg1, pdg2], axis=1), check_index_type=True, ) def test_concat_multiindex_series_and_dataframe(): gdf = gd.DataFrame( { "w": np.arange(4), "x": np.arange(4), "y": np.arange(4), "z": np.arange(4), } ) gdg = gdf.groupby(["w", "x"]).min() pdg = gdg.to_pandas() pdg1 = pdg["z"] pdg2 = pdg[["y", "z"]] pdg1.name = "a" gdg1 = gd.from_pandas(pdg1) gdg2 = gd.from_pandas(pdg2) assert_eq( gd.concat([gdg1, gdg2], axis=1), pd.concat([pdg1, pdg2], axis=1), check_index_type=True, ) @pytest.mark.parametrize("myindex", ["a", "b"]) def test_concat_string_index_name(myindex): # GH-Issue #3420 data = {"a": [123, 456], "b": ["s1", "s2"]} df1 = gd.DataFrame(data).set_index(myindex) df2 = df1.copy() df3 = gd.concat([df1, df2]) assert df3.index.name == myindex def test_pandas_concat_compatibility_axis1(): d1 = gd.datasets.randomdata( 3, dtypes={"a": float, "ind": float} ).set_index("ind") d2 = gd.datasets.randomdata( 3, dtypes={"b": float, "ind": float} ).set_index("ind") d3 = gd.datasets.randomdata( 3, dtypes={"c": float, "ind": float} ).set_index("ind") d4 = gd.datasets.randomdata( 3, dtypes={"d": float, "ind": float} ).set_index("ind") d5 = gd.datasets.randomdata( 3, dtypes={"e": float, "ind": float} ).set_index("ind") pd1 = d1.to_pandas() pd2 = d2.to_pandas() pd3 = d3.to_pandas() pd4 = d4.to_pandas() pd5 = d5.to_pandas() expect = pd.concat([pd1, pd2, pd3, pd4, pd5], axis=1) got = gd.concat([d1, d2, d3, d4, d5], axis=1) assert_eq( got.sort_index(), expect.sort_index(), check_index_type=True, ) @pytest.mark.parametrize("index", [[0, 1, 2], [2, 1, 0], [5, 9, 10]]) @pytest.mark.parametrize("names", [False, (0, 1)]) @pytest.mark.parametrize( "data", [ (["a", "b", "c"], ["a", "b", "c"]), (["a", "b", "c"], ["XX", "YY", "ZZ"]), ], ) def test_pandas_concat_compatibility_axis1_overlap(index, names, data): s1 = gd.Series(data[0], index=[0, 1, 2]) s2 = gd.Series(data[1], index=index) if names: s1.name = names[0] s2.name = names[1] ps1 = s1.to_pandas() ps2 = s2.to_pandas() got = gd.concat([s1, s2], axis=1) expect = pd.concat([ps1, ps2], axis=1) assert_eq(got, expect, check_index_type=True) def test_pandas_concat_compatibility_axis1_eq_index(): s1 = gd.Series(["a", "b", "c"], index=[0, 1, 2]) s2 = gd.Series(["a", "b", "c"], index=[1, 1, 1]) ps1 = s1.to_pandas() ps2 = s2.to_pandas() with pytest.warns(FutureWarning): assert_exceptions_equal( lfunc=pd.concat, rfunc=gd.concat, lfunc_args_and_kwargs=([], {"objs": [ps1, ps2], "axis": 1}), rfunc_args_and_kwargs=([], {"objs": [s1, s2], "axis": 1}), ) @pytest.mark.parametrize("name", [None, "a"]) def test_pandas_concat_compatibility_axis1_single_column(name): # Pandas renames series name `None` to 0 # and preserves anything else s = gd.Series([1, 2, 3], name=name) got = gd.concat([s], axis=1) expected = pd.concat([s.to_pandas()], axis=1) assert_eq(expected, got) def test_concat_duplicate_columns(): cdf = gd.DataFrame( { "id4": 4 * list(range(6)), "id5": 4 * list(reversed(range(6))), "v3": 6 * list(range(4)), } ) cdf_std = cdf.groupby(["id4", "id5"])[["v3"]].std() cdf_med = cdf.groupby(["id4", "id5"])[["v3"]].quantile(q=0.5) with pytest.raises(NotImplementedError): gd.concat([cdf_med, cdf_std], axis=1) def test_concat_mixed_input(): pdf1 = pd.DataFrame({"a": [10, 20, 30]}) pdf2 = pd.DataFrame({"a": [11, 22, 33]}) gdf1 = gd.from_pandas(pdf1) gdf2 = gd.from_pandas(pdf2) assert_eq( pd.concat([pdf1, None, pdf2, None]), gd.concat([gdf1, None, gdf2, None]), check_index_type=True, ) assert_eq( pd.concat([pdf1, None]), gd.concat([gdf1, None]), check_index_type=True ) assert_eq( pd.concat([None, pdf2]), gd.concat([None, gdf2]), check_index_type=True ) assert_eq( pd.concat([None, pdf2, pdf1]), gd.concat([None, gdf2, gdf1]), check_index_type=True, ) @pytest.mark.parametrize( "objs", [ [pd.Series([1, 2, 3]), pd.DataFrame({"a": [1, 2]})], [pd.Series([1, 2, 3]), pd.DataFrame({"a": []})], [pd.Series([], dtype="float64"), pd.DataFrame({"a": []})], [pd.Series([], dtype="float64"), pd.DataFrame({"a": [1, 2]})], [pd.Series([1, 2, 3.0, 1.2], name="abc"), pd.DataFrame({"a": [1, 2]})], [ pd.Series( [1, 2, 3.0, 1.2], name="abc", index=[100, 110, 120, 130] ), pd.DataFrame({"a": [1, 2]}), ], [ pd.Series( [1, 2, 3.0, 1.2], name="abc", index=["a", "b", "c", "d"] ), pd.DataFrame({"a": [1, 2]}, index=["a", "b"]), ], [ pd.Series( [1, 2, 3.0, 1.2, 8, 100], name="New name", index=["a", "b", "c", "d", "e", "f"], ), pd.DataFrame( {"a": [1, 2, 4, 10, 11, 12]}, index=["a", "b", "c", "d", "e", "f"], ), ], [ pd.Series( [1, 2, 3.0, 1.2, 8, 100], name="New name", index=["a", "b", "c", "d", "e", "f"], ), pd.DataFrame( {"a": [1, 2, 4, 10, 11, 12]}, index=["a", "b", "c", "d", "e", "f"], ), ] * 7, ], ) def test_concat_series_dataframe_input(objs): pd_objs = objs gd_objs = [gd.from_pandas(obj) for obj in objs] expected = pd.concat(pd_objs) actual = gd.concat(gd_objs) assert_eq( expected.fillna(-1), actual.fillna(-1), check_dtype=False, check_index_type=False, ) @pytest.mark.parametrize( "objs", [ [ pd.Series(["a", "b", "c", "d"]), pd.Series(["1", "2", "3", "4"]), pd.DataFrame({"first col": ["10", "11", "12", "13"]}), ], [ pd.Series(["a", "b", "c", "d"]), pd.Series(["1", "2", "3", "4"]), pd.DataFrame( { "first col": ["10", "11", "12", "13"], "second col": ["a", "b", "c", "d"], } ), ], [ pd.Series(["a", "b", "c"]), pd.Series(["1", "2", "3", "4"]), pd.DataFrame( { "first col": ["10", "11", "12", "13"], "second col": ["a", "b", "c", "d"], } ), ], ], ) def test_concat_series_dataframe_input_str(objs): pd_objs = objs gd_objs = [gd.from_pandas(obj) for obj in objs] expected = pd.concat(pd_objs) actual = gd.concat(gd_objs) assert_eq(expected, actual, check_dtype=False, check_index_type=False) @pytest.mark.parametrize( "df", [ pd.DataFrame(), pd.DataFrame(index=[10, 20, 30]), pd.DataFrame( {"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20] ), pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")), pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}), pd.DataFrame({"l": [10]}), pd.DataFrame({"l": [10]}, index=[200]), pd.DataFrame([], index=[100]), pd.DataFrame({"cat": pd.Series(["one", "two"], dtype="category")}), ], ) @pytest.mark.parametrize( "other", [ [pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()], [ pd.DataFrame( {"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20] ), pd.DataFrame(), pd.DataFrame(), pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")), ], [ pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}), pd.DataFrame({"l": [10]}), pd.DataFrame({"l": [10]}, index=[200]), pd.DataFrame( {"cat": pd.Series(["two", "three"], dtype="category")} ), ], [ pd.DataFrame([]), pd.DataFrame([], index=[100]), pd.DataFrame( {"cat": pd.Series(["two", "three"], dtype="category")} ), ], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) def test_concat_empty_dataframes(df, other, ignore_index): other_pd = [df] + other gdf = gd.from_pandas(df) other_gd = [gdf] + [gd.from_pandas(o) for o in other] expected = pd.concat(other_pd, ignore_index=ignore_index) actual = gd.concat(other_gd, ignore_index=ignore_index) if expected.shape != df.shape: for key, col in actual[actual.columns].items(): if is_categorical_dtype(col.dtype): if not is_categorical_dtype(expected[key].dtype): # TODO: Pandas bug: # https://github.com/pandas-dev/pandas/issues/42840 expected[key] = expected[key].fillna("-1").astype("str") else: expected[key] = ( expected[key] .cat.add_categories(["-1"]) .fillna("-1") .astype("str") ) actual[key] = col.astype("str").fillna("-1") else: expected[key] = expected[key].fillna(-1) actual[key] = col.fillna(-1) assert_eq(expected, actual, check_dtype=False, check_index_type=True) else: assert_eq(expected, actual, check_index_type=not gdf.empty) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("axis", [0, "index"]) @pytest.mark.parametrize( "data", [ (["a", "b", "c"], ["a", "b", "c"]), (["a", "b", "c"], ["XX", "YY", "ZZ"]), ], ) def test_concat_empty_and_nonempty_series(ignore_index, data, axis): s1 = gd.Series() s2 = gd.Series(data[0]) ps1 = s1.to_pandas() ps2 = s2.to_pandas() got = gd.concat([s1, s2], axis=axis, ignore_index=ignore_index) expect = pd.concat([ps1, ps2], axis=axis, ignore_index=ignore_index) assert_eq(got, expect, check_index_type=True) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("axis", [0, "index"]) def test_concat_two_empty_series(ignore_index, axis): s1 = gd.Series() s2 = gd.Series() ps1 = s1.to_pandas() ps2 = s2.to_pandas() got = gd.concat([s1, s2], axis=axis, ignore_index=ignore_index) expect = pd.concat([ps1, ps2], axis=axis, ignore_index=ignore_index) assert_eq(got, expect, check_index_type=True) @pytest.mark.parametrize( "df1,df2", [ ( gd.DataFrame({"k1": [0, 1], "k2": [2, 3], "v1": [4, 5]}), gd.DataFrame({"k1": [1, 0], "k2": [3, 2], "v2": [6, 7]}), ), ( gd.DataFrame({"k1": [0, 1], "k2": [2, 3], "v1": [4, 5]}), gd.DataFrame({"k1": [0, 1], "k2": [3, 2], "v2": [6, 7]}), ), ], ) def test_concat_dataframe_with_multiindex(df1, df2): gdf1 = df1 gdf1 = gdf1.set_index(["k1", "k2"]) gdf2 = df2 gdf2 = gdf2.set_index(["k1", "k2"]) pdf1 = gdf1.to_pandas() pdf2 = gdf2.to_pandas() actual = gd.concat([gdf1, gdf2], axis=1) expected = pd.concat([pdf1, pdf2], axis=1) # Will need to sort_index before comparing as # ordering is not deterministic in case of pandas # multiIndex with concat. assert_eq( expected.sort_index(), actual.sort_index(), check_index_type=True, ) @pytest.mark.parametrize( "objs", [ [ pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ), pd.DataFrame( {"x": range(10, 20), "y": list(map(float, range(10, 20)))} ), ], [ pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], }, index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ), pd.DataFrame( {"x": range(10, 20), "y": list(map(float, range(10, 20)))}, index=["k", "l", "m", "n", "o", "p", "q", "r", "s", "t"], ), pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], }, index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], ), pd.DataFrame( {"x": range(10, 20), "y": list(map(float, range(10, 20)))}, index=["a", "b", "c", "d", "z", "f", "g", "h", "i", "w"], ), ], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0]) def test_concat_join(objs, ignore_index, sort, join, axis): gpu_objs = [gd.from_pandas(o) for o in objs] assert_eq( pd.concat( objs, sort=sort, join=join, ignore_index=ignore_index, axis=axis ), gd.concat( gpu_objs, sort=sort, join=join, ignore_index=ignore_index, axis=axis, ), check_index_type=True, ) @pytest.mark.parametrize( "objs", [ [ pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ), pd.DataFrame( {"x": range(10, 20), "y": list(map(float, range(10, 20)))} ), ], ], ) def test_concat_join_axis_1_dup_error(objs): gpu_objs = [gd.from_pandas(o) for o in objs] # we do not support duplicate columns with pytest.raises(NotImplementedError): assert_eq( pd.concat( objs, axis=1, ), gd.concat( gpu_objs, axis=1, ), ) @pytest.mark.parametrize( "objs", [ [ pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ), pd.DataFrame( {"l": range(10, 20), "m": list(map(float, range(10, 20)))} ), ], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [1]) def test_concat_join_axis_1(objs, ignore_index, sort, join, axis): # no duplicate columns gpu_objs = [gd.from_pandas(o) for o in objs] expected = pd.concat( objs, sort=sort, join=join, ignore_index=ignore_index, axis=axis ) actual = gd.concat( gpu_objs, sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) if PANDAS_GE_150: assert_eq(expected, actual, check_index_type=True) else: # special handling of check_index_type below # required because: # https://github.com/pandas-dev/pandas/issues/47501 assert_eq(expected, actual, check_index_type=not (axis == 1 and sort)) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [1, 0]) def test_concat_join_many_df_and_empty_df(ignore_index, sort, join, axis): # no duplicate columns pdf1 = pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ) pdf2 = pd.DataFrame( {"l": range(10, 20), "m": list(map(float, range(10, 20)))} ) pdf3 = pd.DataFrame({"j": [1, 2], "k": [1, 2], "s": [1, 2], "t": [1, 2]}) pdf_empty1 = pd.DataFrame() gdf1 = gd.from_pandas(pdf1) gdf2 = gd.from_pandas(pdf2) gdf3 = gd.from_pandas(pdf3) gdf_empty1 = gd.from_pandas(pdf_empty1) assert_eq( pd.concat( [pdf1, pdf2, pdf3, pdf_empty1], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ), gd.concat( [gdf1, gdf2, gdf3, gdf_empty1], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ), check_index_type=False, ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0, 1]) def test_concat_join_one_df(ignore_index, sort, join, axis): pdf1 = pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ) gdf1 = gd.from_pandas(pdf1) expected = pd.concat( [pdf1], sort=sort, join=join, ignore_index=ignore_index, axis=axis ) actual = gd.concat( [gdf1], sort=sort, join=join, ignore_index=ignore_index, axis=axis ) if PANDAS_GE_150: assert_eq(expected, actual, check_index_type=True) else: # special handling of check_index_type below # required because: # https://github.com/pandas-dev/pandas/issues/47501 assert_eq(expected, actual, check_index_type=not (axis == 1 and sort)) @pytest.mark.parametrize( "pdf1,pdf2", [ ( pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}), pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}), ), ( pd.DataFrame( {"a": [1, 2, 3], "b": [4, 5, 6]}, index=["p", "q", "r"] ), pd.DataFrame( {"c": [7, 8, 9], "d": [10, 11, 12]}, index=["r", "p", "z"] ), ), ], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.xfail( condition=PANDAS_LT_140, reason="https://github.com/pandas-dev/pandas/issues/43584", ) def test_concat_join_no_overlapping_columns( pdf1, pdf2, ignore_index, sort, join, axis ): gdf1 = gd.from_pandas(pdf1) gdf2 = gd.from_pandas(pdf2) expected = pd.concat( [pdf1, pdf2], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) actual = gd.concat( [gdf1, gdf2], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) if PANDAS_GE_150: assert_eq(expected, actual, check_index_type=True) else: # special handling of check_index_type below # required because: # https://github.com/pandas-dev/pandas/issues/47501 assert_eq(expected, actual, check_index_type=not (axis == 1 and sort)) @pytest.mark.parametrize("ignore_index", [False, True]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0, 1]) def test_concat_join_no_overlapping_columns_many_and_empty( ignore_index, sort, join, axis ): pdf4 = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) pdf5 = pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}) pdf6 = pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ) pdf_empty = pd.DataFrame() gdf4 = gd.from_pandas(pdf4) gdf5 = gd.from_pandas(pdf5) gdf6 = gd.from_pandas(pdf6) gdf_empty = gd.from_pandas(pdf_empty) expected = pd.concat( [pdf4, pdf5, pdf6, pdf_empty], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) actual = gd.concat( [gdf4, gdf5, gdf6, gdf_empty], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) assert_eq( expected, actual, check_index_type=False, ) @pytest.mark.parametrize( "objs", [ [ pd.DataFrame( {"a": [1, 2, 3], "b": [4, 5, 6]}, index=["z", "t", "k"] ), pd.DataFrame( {"c": [7, 8, 9], "d": [10, 11, 12]}, index=["z", "t", "k"] ), pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], }, index=["z", "t", "k", "a", "b", "c", "d", "e", "f", "g"], ), pd.DataFrame(index=pd.Index([], dtype="str")), ], [ pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}), pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}), pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ), pd.DataFrame(index=pd.Index([], dtype="str")), ], pytest.param( [ pd.DataFrame( {"a": [1, 2, 3], "nb": [10, 11, 12]}, index=["Q", "W", "R"] ), None, ], ), ], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [False, True]) @pytest.mark.parametrize("join", ["outer", "inner"]) @pytest.mark.parametrize("axis", [0, 1]) def test_concat_join_no_overlapping_columns_many_and_empty2( objs, ignore_index, sort, join, axis ): objs_gd = [gd.from_pandas(o) if o is not None else o for o in objs] expected = pd.concat( objs, sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) actual = gd.concat( objs_gd, sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) assert_eq(expected, actual, check_index_type=False) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0, 1]) def test_concat_join_no_overlapping_columns_empty_df_basic( ignore_index, sort, join, axis ): pdf6 = pd.DataFrame( { "x": range(10), "y": list(map(float, range(10))), "z": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], } ) pdf_empty = pd.DataFrame() gdf6 = gd.from_pandas(pdf6) gdf_empty = gd.from_pandas(pdf_empty) expected = pd.concat( [pdf6, pdf_empty], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) actual = gd.concat( [gdf6, gdf_empty], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) # TODO: change `check_index_type` to `True` # after following bug from pandas is fixed: # https://github.com/pandas-dev/pandas/issues/46675 assert_eq(expected, actual, check_index_type=False) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0, 1]) def test_concat_join_series(ignore_index, sort, join, axis): s1 = gd.Series(["a", "b", "c"]) s2 = gd.Series(["a", "b"]) s3 = gd.Series(["a", "b", "c", "d"]) s4 = gd.Series() ps1 = s1.to_pandas() ps2 = s2.to_pandas() ps3 = s3.to_pandas() ps4 = s4.to_pandas() expected = pd.concat( [ps1, ps2, ps3, ps4], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) actual = gd.concat( [s1, s2, s3, s4], sort=sort, join=join, ignore_index=ignore_index, axis=axis, ) if PANDAS_GE_150: assert_eq( expected, actual, check_index_type=True, ) else: # special handling of check_index_type required below: # https://github.com/pandas-dev/pandas/issues/46675 # https://github.com/pandas-dev/pandas/issues/47501 assert_eq( expected, actual, check_index_type=(axis == 0), ) @pytest.mark.parametrize( "df", [ pd.DataFrame(), pd.DataFrame(index=[10, 20, 30]), pd.DataFrame( {"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20] ), pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")), pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}), pd.DataFrame({"l": [10]}), pd.DataFrame({"l": [10]}, index=[200]), pd.DataFrame([], index=[100]), pd.DataFrame({"cat": pd.Series(["one", "two"], dtype="category")}), ], ) @pytest.mark.parametrize( "other", [ [pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()], [ pd.DataFrame( {"b": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20] ), pd.DataFrame(), pd.DataFrame(), pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")), ], [ pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}), pd.DataFrame({"l": [10]}), pd.DataFrame({"k": [10]}, index=[200]), pd.DataFrame( {"cat": pd.Series(["two", "three"], dtype="category")} ), ], [ pd.DataFrame([]), pd.DataFrame([], index=[100]), pd.DataFrame( {"cat": pd.Series(["two", "three"], dtype="category")} ), ], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("join", ["inner", "outer"]) @pytest.mark.parametrize("axis", [0]) def test_concat_join_empty_dataframes( df, other, ignore_index, axis, join, sort ): other_pd = [df] + other gdf = gd.from_pandas(df) other_gd = [gdf] + [gd.from_pandas(o) for o in other] expected = pd.concat( other_pd, ignore_index=ignore_index, axis=axis, join=join, sort=sort ) actual = gd.concat( other_gd, ignore_index=ignore_index, axis=axis, join=join, sort=sort ) if expected.shape != df.shape: if axis == 0: for key, col in actual[actual.columns].items(): if is_categorical_dtype(col.dtype): if not is_categorical_dtype(expected[key].dtype): # TODO: Pandas bug: # https://github.com/pandas-dev/pandas/issues/42840 expected[key] = ( expected[key].fillna("-1").astype("str") ) else: expected[key] = ( expected[key] .cat.add_categories(["-1"]) .fillna("-1") .astype("str") ) actual[key] = col.astype("str").fillna("-1") else: expected[key] = expected[key].fillna(-1) actual[key] = col.fillna(-1) assert_eq( expected.fillna(-1), actual.fillna(-1), check_dtype=False, check_index_type=False if len(expected) == 0 or actual.empty else True, check_column_type=False, ) else: # no need to fill in if axis=1 assert_eq( expected, actual, check_index_type=False, check_column_type=False, ) assert_eq( expected, actual, check_dtype=False, check_index_type=False, check_column_type=False, ) @pytest.mark.parametrize( "df", [ pd.DataFrame(), pd.DataFrame(index=[10, 20, 30]), pd.DataFrame( {"c": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20] ), pd.DataFrame([[5, 6], [7, 8]], columns=list("AB")), pd.DataFrame({"f": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}), pd.DataFrame({"l": [10]}), pd.DataFrame({"m": [10]}, index=[200]), pd.DataFrame([], index=[100]), pd.DataFrame({"cat": pd.Series(["one", "two"], dtype="category")}), ], ) @pytest.mark.parametrize( "other", [ [pd.DataFrame(), pd.DataFrame(), pd.DataFrame(), pd.DataFrame()], [ pd.DataFrame( {"b": [10, 11, 22, 33, 44, 100]}, index=[7, 8, 9, 10, 11, 20] ), pd.DataFrame(), pd.DataFrame(), pd.DataFrame([[5, 6], [7, 8]], columns=list("CD")), ], [ pd.DataFrame({"g": [10.2, 11.2332, 0.22, 3.3, 44.23, 10.0]}), pd.DataFrame({"h": [10]}), pd.DataFrame({"k": [10]}, index=[200]), pd.DataFrame( {"dog": pd.Series(["two", "three"], dtype="category")} ), ], [ pd.DataFrame([]), pd.DataFrame([], index=[100]), pd.DataFrame( {"bird": pd.Series(["two", "three"], dtype="category")} ), ], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize( "join", [ "inner", pytest.param( "outer", marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/issues/37937", ), ), ], ) @pytest.mark.parametrize("axis", [1]) def test_concat_join_empty_dataframes_axis_1( df, other, ignore_index, axis, join, sort ): # no duplicate columns other_pd = [df] + other gdf = gd.from_pandas(df) other_gd = [gdf] + [gd.from_pandas(o) for o in other] expected = pd.concat( other_pd, ignore_index=ignore_index, axis=axis, join=join, sort=sort ) actual = gd.concat( other_gd, ignore_index=ignore_index, axis=axis, join=join, sort=sort ) if expected.shape != df.shape: if axis == 0: for key, col in actual[actual.columns].items(): if is_categorical_dtype(col.dtype): expected[key] = expected[key].fillna("-1") actual[key] = col.astype("str").fillna("-1") # if not expected.empty: assert_eq( expected.fillna(-1), actual.fillna(-1), check_dtype=False, check_index_type=False if len(expected) == 0 or actual.empty else True, check_column_type=False, ) else: # no need to fill in if axis=1 assert_eq( expected, actual, check_index_type=False, check_column_type=False, ) assert_eq( expected, actual, check_index_type=False, check_column_type=False ) def test_concat_preserve_order(): """Ensure that order is preserved on 'inner' concatenations.""" df = pd.DataFrame([["d", 3, 4.0], ["c", 4, 5.0]], columns=["c", "b", "a"]) dfs = [df, df] assert_eq( pd.concat(dfs, join="inner"), gd.concat([gd.DataFrame(df) for df in dfs], join="inner"), check_index_type=True, ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("typ", [gd.DataFrame, gd.Series]) def test_concat_single_object(ignore_index, typ): """Ensure that concat on a single object does not change it.""" obj = typ([1, 2, 3]) assert_eq( gd.concat([obj], ignore_index=ignore_index, axis=0), obj, check_index_type=True, ) @pytest.mark.parametrize( "ltype", [Decimal64Dtype(3, 1), Decimal64Dtype(7, 2), Decimal64Dtype(8, 4)], ) @pytest.mark.parametrize( "rtype", [ Decimal64Dtype(3, 2), Decimal64Dtype(8, 4), gd.Decimal128Dtype(3, 2), gd.Decimal32Dtype(8, 4), ], ) def test_concat_decimal_dataframe(ltype, rtype): gdf1 = gd.DataFrame( {"id": np.random.randint(0, 10, 3), "val": ["22.3", "59.5", "81.1"]} ) gdf2 = gd.DataFrame( {"id": np.random.randint(0, 10, 3), "val": ["2.35", "5.59", "8.14"]} ) gdf1["val"] = gdf1["val"].astype(ltype) gdf2["val"] = gdf2["val"].astype(rtype) pdf1 = gdf1.to_pandas() pdf2 = gdf2.to_pandas() got = gd.concat([gdf1, gdf2]) expected = pd.concat([pdf1, pdf2]) assert_eq(expected, got, check_index_type=True) @pytest.mark.parametrize("ltype", [Decimal64Dtype(4, 1), Decimal64Dtype(8, 2)]) @pytest.mark.parametrize( "rtype", [ Decimal64Dtype(4, 3), Decimal64Dtype(10, 4), Decimal32Dtype(8, 3), Decimal128Dtype(18, 3), ], ) def test_concat_decimal_series(ltype, rtype): gs1 = gd.Series(["228.3", "559.5", "281.1"]).astype(ltype) gs2 = gd.Series(["2.345", "5.259", "8.154"]).astype(rtype) ps1 = gs1.to_pandas() ps2 = gs2.to_pandas() got = gd.concat([gs1, gs2]) expected = pd.concat([ps1, ps2]) assert_eq(expected, got, check_index_type=True) @pytest.mark.parametrize( "df1, df2, df3, expected", [ ( gd.DataFrame( {"val": [Decimal("42.5"), Decimal("8.7")]}, dtype=Decimal64Dtype(5, 2), ), gd.DataFrame( {"val": [Decimal("9.23"), Decimal("-67.49")]}, dtype=Decimal64Dtype(6, 4), ), gd.DataFrame({"val": [8, -5]}, dtype="int32"), gd.DataFrame( { "val": [ Decimal("42.5"), Decimal("8.7"), Decimal("9.23"), Decimal("-67.49"), Decimal("8"), Decimal("-5"), ] }, dtype=Decimal32Dtype(7, 4), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.DataFrame( {"val": [Decimal("95.2"), Decimal("23.4")]}, dtype=Decimal64Dtype(5, 2), ), gd.DataFrame({"val": [54, 509]}, dtype="uint16"), gd.DataFrame({"val": [24, -48]}, dtype="int32"), gd.DataFrame( { "val": [ Decimal("95.2"), Decimal("23.4"), Decimal("54"), Decimal("509"), Decimal("24"), Decimal("-48"), ] }, dtype=Decimal32Dtype(5, 2), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.DataFrame( {"val": [Decimal("36.56"), Decimal("-59.24")]}, dtype=Decimal64Dtype(9, 4), ), gd.DataFrame({"val": [403.21, 45.13]}, dtype="float32"), gd.DataFrame({"val": [52.262, -49.25]}, dtype="float64"), gd.DataFrame( { "val": [ Decimal("36.56"), Decimal("-59.24"), Decimal("403.21"), Decimal("45.13"), Decimal("52.262"), Decimal("-49.25"), ] }, dtype=Decimal32Dtype(9, 4), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.DataFrame( {"val": [Decimal("9563.24"), Decimal("236.633")]}, dtype=Decimal64Dtype(9, 4), ), gd.DataFrame({"val": [5393, -95832]}, dtype="int64"), gd.DataFrame({"val": [-29.234, -31.945]}, dtype="float64"), gd.DataFrame( { "val": [ Decimal("9563.24"), Decimal("236.633"), Decimal("5393"), Decimal("-95832"), Decimal("-29.234"), Decimal("-31.945"), ] }, dtype=Decimal32Dtype(9, 4), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.DataFrame( {"val": [Decimal("95633.24"), Decimal("236.633")]}, dtype=Decimal128Dtype(19, 4), ), gd.DataFrame({"val": [5393, -95832]}, dtype="int64"), gd.DataFrame({"val": [-29.234, -31.945]}, dtype="float64"), gd.DataFrame( { "val": [ Decimal("95633.24"), Decimal("236.633"), Decimal("5393"), Decimal("-95832"), Decimal("-29.234"), Decimal("-31.945"), ] }, dtype=Decimal128Dtype(19, 4), index=[0, 1, 0, 1, 0, 1], ), ), ], ) def test_concat_decimal_numeric_dataframe(df1, df2, df3, expected): df = gd.concat([df1, df2, df3]) assert_eq(df, expected, check_index_type=True) assert_eq(df.val.dtype, expected.val.dtype) @pytest.mark.parametrize( "s1, s2, s3, expected", [ ( gd.Series( [Decimal("32.8"), Decimal("-87.7")], dtype=Decimal64Dtype(6, 2) ), gd.Series( [Decimal("101.243"), Decimal("-92.449")], dtype=Decimal64Dtype(9, 6), ), gd.Series([94, -22], dtype="int32"), gd.Series( [ Decimal("32.8"), Decimal("-87.7"), Decimal("101.243"), Decimal("-92.449"), Decimal("94"), Decimal("-22"), ], dtype=Decimal64Dtype(10, 6), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.Series( [Decimal("7.2"), Decimal("122.1")], dtype=Decimal64Dtype(5, 2) ), gd.Series([33, 984], dtype="uint32"), gd.Series([593, -702], dtype="int32"), gd.Series( [ Decimal("7.2"), Decimal("122.1"), Decimal("33"), Decimal("984"), Decimal("593"), Decimal("-702"), ], dtype=Decimal32Dtype(5, 2), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.Series( [Decimal("982.94"), Decimal("-493.626")], dtype=Decimal64Dtype(9, 4), ), gd.Series([847.98, 254.442], dtype="float32"), gd.Series([5299.262, -2049.25], dtype="float64"), gd.Series( [ Decimal("982.94"), Decimal("-493.626"), Decimal("847.98"), Decimal("254.442"), Decimal("5299.262"), Decimal("-2049.25"), ], dtype=Decimal32Dtype(9, 4), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.Series( [Decimal("492.204"), Decimal("-72824.455")], dtype=Decimal64Dtype(9, 4), ), gd.Series([8438, -27462], dtype="int64"), gd.Series([-40.292, 49202.953], dtype="float64"), gd.Series( [ Decimal("492.204"), Decimal("-72824.455"), Decimal("8438"), Decimal("-27462"), Decimal("-40.292"), Decimal("49202.953"), ], dtype=Decimal32Dtype(9, 4), index=[0, 1, 0, 1, 0, 1], ), ), ( gd.Series( [Decimal("492.204"), Decimal("-72824.455")], dtype=Decimal64Dtype(10, 4), ), gd.Series( [Decimal("8438"), Decimal("-27462")], dtype=Decimal32Dtype(9, 4), ), gd.Series( [Decimal("-40.292"), Decimal("49202.953")], dtype=Decimal128Dtype(19, 4), ), gd.Series( [ Decimal("492.204"), Decimal("-72824.455"), Decimal("8438"), Decimal("-27462"), Decimal("-40.292"), Decimal("49202.953"), ], dtype=Decimal128Dtype(19, 4), index=[0, 1, 0, 1, 0, 1], ), ), ], ) def test_concat_decimal_numeric_series(s1, s2, s3, expected): s = gd.concat([s1, s2, s3]) assert_eq(s, expected, check_index_type=True) @pytest.mark.parametrize( "s1, s2, expected", [ ( gd.Series( [Decimal("955.22"), Decimal("8.2")], dtype=Decimal64Dtype(5, 2) ), gd.Series(["2007-06-12", "2006-03-14"], dtype="datetime64[s]"), gd.Series( [ "955.22", "8.20", "2007-06-12 00:00:00", "2006-03-14 00:00:00", ], index=[0, 1, 0, 1], ), ), ( gd.Series( [Decimal("-52.44"), Decimal("365.22")], dtype=Decimal64Dtype(5, 2), ), gd.Series( np.arange( "2005-02-01T12", "2005-02-01T15", dtype="datetime64[h]" ), dtype="datetime64[s]", ), gd.Series( [ "-52.44", "365.22", "2005-02-01 12:00:00", "2005-02-01 13:00:00", "2005-02-01 14:00:00", ], index=[0, 1, 0, 1, 2], ), ), ( gd.Series( [Decimal("753.0"), Decimal("94.22")], dtype=Decimal64Dtype(5, 2), ), gd.Series([np.timedelta64(111, "s"), np.timedelta64(509, "s")]), gd.Series( ["753.00", "94.22", "0 days 00:01:51", "0 days 00:08:29"], index=[0, 1, 0, 1], ), ), ( gd.Series( [Decimal("753.0"), Decimal("94.22")], dtype=Decimal64Dtype(5, 2), ), gd.Series( [np.timedelta64(940252, "s"), np.timedelta64(758385, "s")] ), gd.Series( ["753.00", "94.22", "10 days 21:10:52", "8 days 18:39:45"], index=[0, 1, 0, 1], ), ), ], ) def test_concat_decimal_non_numeric(s1, s2, expected): s = gd.concat([s1, s2]) assert_eq(s, expected, check_index_type=True) @pytest.mark.parametrize( "s1, s2, expected", [ ( gd.Series([{"a": 5}, {"c": "hello"}, {"b": 7}]), gd.Series([{"a": 5, "c": "hello", "b": 7}]), gd.Series( [ {"a": 5, "b": None, "c": None}, {"a": None, "b": None, "c": "hello"}, {"a": None, "b": 7, "c": None}, {"a": 5, "b": 7, "c": "hello"}, ], index=[0, 1, 2, 0], ), ) ], ) def test_concat_struct_column(s1, s2, expected): s = gd.concat([s1, s2]) assert_eq(s, expected, check_index_type=True) @pytest.mark.parametrize( "frame1, frame2, expected", [ ( gd.Series([[{"b": 0}], [{"b": 1}], [{"b": 3}]]), gd.Series([[{"b": 10}], [{"b": 12}], None]), gd.Series( [ [{"b": 0}], [{"b": 1}], [{"b": 3}], [{"b": 10}], [{"b": 12}], None, ], index=[0, 1, 2, 0, 1, 2], ), ), ( gd.DataFrame({"a": [[{"b": 0}], [{"b": 1}], [{"b": 3}]]}), gd.DataFrame({"a": [[{"b": 10}], [{"b": 12}], None]}), gd.DataFrame( { "a": [ [{"b": 0}], [{"b": 1}], [{"b": 3}], [{"b": 10}], [{"b": 12}], None, ] }, index=[0, 1, 2, 0, 1, 2], ), ), ], ) def test_concat_list_column(frame1, frame2, expected): actual = gd.concat([frame1, frame2]) assert_eq(actual, expected, check_index_type=True) def test_concat_categorical_ordering(): # https://github.com/rapidsai/cudf/issues/11486 sr = pd.Series( ["a", "b", "c", "d", "e", "a", "b", "c", "d", "e"], dtype="category" ) sr = sr.cat.set_categories(["d", "a", "b", "c", "e"]) df = pd.DataFrame({"a": sr}) gdf = gd.from_pandas(df) expect = pd.concat([df, df, df]) got = gd.concat([gdf, gdf, gdf]) assert_eq(expect, got) @pytest.fixture(params=["rangeindex", "index"]) def singleton_concat_index(request): if request.param == "rangeindex": return pd.RangeIndex(0, 4) else: return pd.Index(["a", "h", "g", "f"]) @pytest.fixture(params=["dataframe", "series"]) def singleton_concat_obj(request, singleton_concat_index): if request.param == "dataframe": return pd.DataFrame( { "b": [1, 2, 3, 4], "d": [7, 8, 9, 10], "a": [4, 5, 6, 7], "c": [10, 11, 12, 13], }, index=singleton_concat_index, ) else: return pd.Series([4, 5, 5, 6], index=singleton_concat_index) @pytest.mark.parametrize("axis", [0, 1, "columns", "index"]) @pytest.mark.parametrize("sort", [False, True]) @pytest.mark.parametrize("ignore_index", [False, True]) def test_concat_singleton_sorting( axis, sort, ignore_index, singleton_concat_obj ): gobj = gd.from_pandas(singleton_concat_obj) gconcat = gd.concat( [gobj], axis=axis, sort=sort, ignore_index=ignore_index ) pconcat = pd.concat( [singleton_concat_obj], axis=axis, sort=sort, ignore_index=ignore_index ) assert_eq(pconcat, gconcat) @pytest.mark.parametrize("axis", [2, "invalid"]) def test_concat_invalid_axis(axis): s = gd.Series([1, 2, 3]) with pytest.raises(ValueError): gd.concat([s], axis=axis) @pytest.mark.parametrize( "s1,s2", [ ([1, 2], [[1, 2], [3, 4]]), ], ) def test_concat_mixed_list_types_error(s1, s2): s1, s2 = gd.Series(s1), gd.Series(s2) with pytest.raises(NotImplementedError): gd.concat([s1, s2], ignore_index=True)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_dask.py
# Copyright (c) 2019, NVIDIA CORPORATION. import pytest import cudf is_dataframe_like = pytest.importorskip( "dask.dataframe.utils" ).is_dataframe_like is_index_like = pytest.importorskip("dask.dataframe.utils").is_index_like is_series_like = pytest.importorskip("dask.dataframe.utils").is_series_like def test_is_dataframe_like(): df = cudf.DataFrame({"x": [1, 2, 3]}) assert is_dataframe_like(df) assert is_series_like(df.x) assert is_index_like(df.index) assert not is_dataframe_like(df.x) assert not is_series_like(df) assert not is_index_like(df)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_index.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. """ Test related to Index """ import operator import re import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.api.extensions import no_default from cudf.api.types import is_bool_dtype from cudf.core._compat import PANDAS_GE_133, PANDAS_GE_200 from cudf.core.index import ( CategoricalIndex, DatetimeIndex, GenericIndex, RangeIndex, as_index, ) from cudf.testing._utils import ( ALL_TYPES, FLOAT_TYPES, NUMERIC_TYPES, OTHER_TYPES, SERIES_OR_INDEX_NAMES, SIGNED_INTEGER_TYPES, SIGNED_TYPES, UNSIGNED_TYPES, _create_cudf_series_float64_default, _create_pandas_series_float64_default, assert_column_memory_eq, assert_column_memory_ne, assert_eq, assert_exceptions_equal, expect_warning_if, ) from cudf.utils.utils import search_range def test_df_set_index_from_series(): df = cudf.DataFrame() df["a"] = list(range(10)) df["b"] = list(range(0, 20, 2)) # Check set_index(Series) df2 = df.set_index(df["b"]) assert list(df2.columns) == ["a", "b"] sliced_strided = df2.loc[2:6] assert len(sliced_strided) == 3 assert list(sliced_strided.index.values) == [2, 4, 6] def test_df_set_index_from_name(): df = cudf.DataFrame() df["a"] = list(range(10)) df["b"] = list(range(0, 20, 2)) # Check set_index(column_name) df2 = df.set_index("b") # 1 less column because 'b' is used as index assert list(df2.columns) == ["a"] sliced_strided = df2.loc[2:6] assert len(sliced_strided) == 3 assert list(sliced_strided.index.values) == [2, 4, 6] def test_df_slice_empty_index(): df = cudf.DataFrame() assert isinstance(df.index, RangeIndex) assert isinstance(df.index[:1], RangeIndex) with pytest.raises(IndexError): df.index[1] def test_index_find_label_range_genericindex(): # Monotonic Index idx = cudf.Index(np.asarray([4, 5, 6, 10])) assert idx.find_label_range(slice(4, 6)) == slice(0, 3, 1) assert idx.find_label_range(slice(5, 10)) == slice(1, 4, 1) assert idx.find_label_range(slice(0, 6)) == slice(0, 3, 1) assert idx.find_label_range(slice(4, 11)) == slice(0, 4, 1) # Non-monotonic Index idx_nm = cudf.Index(np.asarray([5, 4, 6, 10])) assert idx_nm.find_label_range(slice(4, 6)) == slice(1, 3, 1) assert idx_nm.find_label_range(slice(5, 10)) == slice(0, 4, 1) # Last value not found with pytest.raises(KeyError) as raises: idx_nm.find_label_range(slice(0, 6)) raises.match("not in index") # Last value not found with pytest.raises(KeyError) as raises: idx_nm.find_label_range(slice(4, 11)) raises.match("not in index") def test_index_find_label_range_rangeindex(): """Cudf specific""" # step > 0 # 3, 8, 13, 18 ridx = RangeIndex(3, 20, 5) assert ridx.find_label_range(slice(3, 8)) == slice(0, 2, 1) assert ridx.find_label_range(slice(0, 7)) == slice(0, 1, 1) assert ridx.find_label_range(slice(3, 19)) == slice(0, 4, 1) assert ridx.find_label_range(slice(2, 21)) == slice(0, 4, 1) # step < 0 # 20, 15, 10, 5 ridx = RangeIndex(20, 3, -5) assert ridx.find_label_range(slice(15, 10)) == slice(1, 3, 1) assert ridx.find_label_range(slice(10, 15, -1)) == slice(2, 0, -1) assert ridx.find_label_range(slice(10, 0)) == slice(2, 4, 1) assert ridx.find_label_range(slice(30, 13)) == slice(0, 2, 1) assert ridx.find_label_range(slice(30, 0)) == slice(0, 4, 1) def test_index_comparision(): start, stop = 10, 34 rg = cudf.RangeIndex(start, stop) gi = cudf.Index(np.arange(start, stop)) assert rg.equals(gi) assert gi.equals(rg) assert not rg[:-1].equals(gi) assert rg[:-1].equals(gi[:-1]) @pytest.mark.parametrize( "func", [ lambda x: x.min(), lambda x: x.max(), lambda x: x.sum(), lambda x: x.mean(), lambda x: x.any(), lambda x: x.all(), lambda x: x.prod(), ], ) def test_reductions(func): x = np.asarray([4, 5, 6, 10]) idx = cudf.Index(np.asarray([4, 5, 6, 10])) assert func(x) == func(idx) def test_name(): idx = cudf.Index(np.asarray([4, 5, 6, 10]), name="foo") assert idx.name == "foo" def test_index_immutable(): start, stop = 10, 34 rg = RangeIndex(start, stop) with pytest.raises(TypeError): rg[1] = 5 gi = cudf.Index(np.arange(start, stop)) with pytest.raises(TypeError): gi[1] = 5 def test_categorical_index(): pdf = pd.DataFrame() pdf["a"] = [1, 2, 3] pdf["index"] = pd.Categorical(["a", "b", "c"]) initial_df = cudf.from_pandas(pdf) pdf = pdf.set_index("index") gdf1 = cudf.from_pandas(pdf) gdf2 = cudf.DataFrame() gdf2["a"] = [1, 2, 3] gdf2["index"] = pd.Categorical(["a", "b", "c"]) assert_eq(initial_df.index, gdf2.index) gdf2 = gdf2.set_index("index") assert isinstance(gdf1.index, CategoricalIndex) assert_eq(pdf, gdf1) assert_eq(pdf.index, gdf1.index) assert_eq( pdf.index.codes, gdf1.index.codes.astype(pdf.index.codes.dtype).to_numpy(), ) assert isinstance(gdf2.index, CategoricalIndex) assert_eq(pdf, gdf2) assert_eq(pdf.index, gdf2.index) assert_eq( pdf.index.codes, gdf2.index.codes.astype(pdf.index.codes.dtype).to_numpy(), ) def test_pandas_as_index(): # Define Pandas Indexes pdf_int_index = pd.Index([1, 2, 3, 4, 5]) pdf_uint_index = pd.Index([1, 2, 3, 4, 5]) pdf_float_index = pd.Index([1.0, 2.0, 3.0, 4.0, 5.0]) pdf_datetime_index = pd.DatetimeIndex( [1000000, 2000000, 3000000, 4000000, 5000000] ) pdf_category_index = pd.CategoricalIndex(["a", "b", "c", "b", "a"]) # Define cudf Indexes gdf_int_index = as_index(pdf_int_index) gdf_uint_index = as_index(pdf_uint_index) gdf_float_index = as_index(pdf_float_index) gdf_datetime_index = as_index(pdf_datetime_index) gdf_category_index = as_index(pdf_category_index) # Check instance types assert isinstance(gdf_int_index, GenericIndex) assert isinstance(gdf_uint_index, GenericIndex) assert isinstance(gdf_float_index, GenericIndex) assert isinstance(gdf_datetime_index, DatetimeIndex) assert isinstance(gdf_category_index, CategoricalIndex) # Check equality assert_eq(pdf_int_index, gdf_int_index) assert_eq(pdf_uint_index, gdf_uint_index) assert_eq(pdf_float_index, gdf_float_index) assert_eq(pdf_datetime_index, gdf_datetime_index) assert_eq(pdf_category_index, gdf_category_index) assert_eq( pdf_category_index.codes, gdf_category_index.codes.astype( pdf_category_index.codes.dtype ).to_numpy(), ) @pytest.mark.parametrize("initial_name", SERIES_OR_INDEX_NAMES) @pytest.mark.parametrize("name", SERIES_OR_INDEX_NAMES) def test_index_rename(initial_name, name): pds = pd.Index([1, 2, 3], name=initial_name) gds = as_index(pds) assert_eq(pds, gds) expect = pds.rename(name) got = gds.rename(name) assert_eq(expect, got) """ From here on testing recursive creation and if name is being handles in recursive creation. """ pds = pd.Index(expect) gds = as_index(got) assert_eq(pds, gds) pds = pd.Index(pds, name="abc") gds = as_index(gds, name="abc") assert_eq(pds, gds) def test_index_rename_inplace(): pds = pd.Index([1, 2, 3], name="asdf") gds = as_index(pds) # inplace=False should yield a deep copy gds_renamed_deep = gds.rename("new_name", inplace=False) assert gds_renamed_deep._values.data_ptr != gds._values.data_ptr # inplace=True returns none expected_ptr = gds._values.data_ptr gds.rename("new_name", inplace=True) assert expected_ptr == gds._values.data_ptr def test_index_rename_preserves_arg(): idx1 = cudf.Index([1, 2, 3], name="orig_name") # this should be an entirely new object idx2 = idx1.rename("new_name", inplace=False) assert idx2.name == "new_name" assert idx1.name == "orig_name" # a new object but referencing the same data idx3 = as_index(idx1, name="last_name") assert idx3.name == "last_name" assert idx1.name == "orig_name" def test_set_index_as_property(): cdf = cudf.DataFrame() col1 = np.arange(10) col2 = np.arange(0, 20, 2) cdf["a"] = col1 cdf["b"] = col2 # Check set_index(Series) cdf.index = cdf["b"] assert_eq(cdf.index.to_numpy(), col2) with pytest.raises(ValueError): cdf.index = [list(range(10))] idx = pd.Index(np.arange(0, 1000, 100)) cdf.index = idx assert_eq(cdf.index.to_pandas(), idx) df = cdf.to_pandas() assert_eq(df.index, idx) head = cdf.head().to_pandas() assert_eq(head.index, idx[:5]) @pytest.mark.parametrize("name", ["x"]) @pytest.mark.parametrize("dtype", SIGNED_INTEGER_TYPES) def test_index_copy_range(name, dtype, deep=True): cidx = cudf.RangeIndex(1, 5) pidx = cidx.to_pandas() with pytest.warns(FutureWarning): pidx_copy = pidx.copy(name=name, deep=deep, dtype=dtype) with pytest.warns(FutureWarning): cidx_copy = cidx.copy(name=name, deep=deep, dtype=dtype) assert_eq(pidx_copy, cidx_copy) @pytest.mark.parametrize("name", ["x"]) @pytest.mark.parametrize("dtype,", ["datetime64[ns]", "int64"]) def test_index_copy_datetime(name, dtype, deep=True): cidx = cudf.DatetimeIndex(["2001", "2002", "2003"]) pidx = cidx.to_pandas() with pytest.warns(FutureWarning): pidx_copy = pidx.copy(name=name, deep=deep, dtype=dtype) with pytest.warns(FutureWarning): cidx_copy = cidx.copy(name=name, deep=deep, dtype=dtype) assert_eq(pidx_copy, cidx_copy) @pytest.mark.parametrize("name", ["x"]) @pytest.mark.parametrize("dtype", ["category", "object"]) def test_index_copy_string(name, dtype, deep=True): cidx = cudf.Index(["a", "b", "c"]) pidx = cidx.to_pandas() with pytest.warns(FutureWarning): pidx_copy = pidx.copy(name=name, deep=deep, dtype=dtype) with pytest.warns(FutureWarning): cidx_copy = cidx.copy(name=name, deep=deep, dtype=dtype) assert_eq(pidx_copy, cidx_copy) @pytest.mark.parametrize("name", ["x"]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["datetime64[ns]", "timedelta64[ns]"] + OTHER_TYPES, ) def test_index_copy_integer(name, dtype, deep=True): """Test for NumericIndex Copy Casts""" cidx = cudf.Index([1, 2, 3]) pidx = cidx.to_pandas() with pytest.warns(FutureWarning): pidx_copy = pidx.copy(name=name, deep=deep, dtype=dtype) with pytest.warns(FutureWarning): cidx_copy = cidx.copy(name=name, deep=deep, dtype=dtype) assert_eq(pidx_copy, cidx_copy) @pytest.mark.parametrize("name", ["x"]) @pytest.mark.parametrize("dtype", SIGNED_TYPES) def test_index_copy_float(name, dtype, deep=True): """Test for NumericIndex Copy Casts""" cidx = cudf.Index([1.0, 2.0, 3.0]) pidx = cidx.to_pandas() with pytest.warns(FutureWarning): pidx_copy = pidx.copy(name=name, deep=deep, dtype=dtype) with pytest.warns(FutureWarning): cidx_copy = cidx.copy(name=name, deep=deep, dtype=dtype) assert_eq(pidx_copy, cidx_copy) @pytest.mark.parametrize("name", ["x"]) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["category"]) def test_index_copy_category(name, dtype, deep=True): cidx = cudf.core.index.CategoricalIndex([1, 2, 3]) pidx = cidx.to_pandas() with pytest.warns(FutureWarning): pidx_copy = pidx.copy(name=name, deep=deep, dtype=dtype) with pytest.warns(FutureWarning): cidx_copy = cidx.copy(name=name, deep=deep, dtype=dtype) assert_column_memory_ne(cidx._values, cidx_copy._values) assert_eq(pidx_copy, cidx_copy) @pytest.mark.parametrize("deep", [True, False]) @pytest.mark.parametrize( "idx", [ cudf.DatetimeIndex(["2001", "2002", "2003"]), cudf.Index(["a", "b", "c"]), cudf.Index([1, 2, 3]), cudf.Index([1.0, 2.0, 3.0]), cudf.CategoricalIndex([1, 2, 3]), cudf.CategoricalIndex(["a", "b", "c"]), ], ) @pytest.mark.parametrize("copy_on_write", [True, False]) def test_index_copy_deep(idx, deep, copy_on_write): """Test if deep copy creates a new instance for device data.""" idx_copy = idx.copy(deep=deep) original_cow_setting = cudf.get_option("copy_on_write") cudf.set_option("copy_on_write", copy_on_write) if ( isinstance(idx, cudf.StringIndex) or not deep or (cudf.get_option("copy_on_write") and not deep) ): # StringColumn is immutable hence, deep copies of a # StringIndex will share the same StringColumn. # When `copy_on_write` is turned on, Index objects will # have unique column object but they all point to same # data pointers. assert_column_memory_eq(idx._values, idx_copy._values) else: assert_column_memory_ne(idx._values, idx_copy._values) cudf.set_option("copy_on_write", original_cow_setting) @pytest.mark.parametrize("idx", [[1, None, 3, None, 5]]) def test_index_isna(idx): pidx = pd.Index(idx, name="idx") gidx = cudf.Index(idx, name="idx") assert_eq(gidx.isna(), pidx.isna()) @pytest.mark.parametrize("idx", [[1, None, 3, None, 5]]) def test_index_notna(idx): pidx = pd.Index(idx, name="idx") gidx = cudf.Index(idx, name="idx") assert_eq(gidx.notna(), pidx.notna()) def test_rangeindex_slice_attr_name(): start, stop = 0, 10 rg = RangeIndex(start, stop, name="myindex") sliced_rg = rg[0:9] assert_eq(rg.name, sliced_rg.name) def test_from_pandas_str(): idx = ["a", "b", "c"] pidx = pd.Index(idx, name="idx") gidx_1 = cudf.Index(idx, name="idx") gidx_2 = cudf.from_pandas(pidx) assert_eq(gidx_1, gidx_2) def test_from_pandas_gen(): idx = [2, 4, 6] pidx = pd.Index(idx, name="idx") gidx_1 = cudf.Index(idx, name="idx") gidx_2 = cudf.from_pandas(pidx) assert_eq(gidx_1, gidx_2) def test_index_names(): idx = cudf.core.index.as_index([1, 2, 3], name="idx") assert idx.names == ("idx",) @pytest.mark.parametrize( "data", [ range(0), range(1), range(0, 1), range(0, 5), range(1, 10), range(1, 10, 1), range(1, 10, 3), range(10, 1, -3), range(-5, 10), ], ) def test_range_index_from_range(data): assert_eq(pd.Index(data), cudf.Index(data)) @pytest.mark.parametrize( "n", [-10, -5, -2, 0, 1, 0, 2, 5, 10], ) def test_empty_df_head_tail_index(n): df = cudf.DataFrame() pdf = pd.DataFrame() assert_eq(df.head(n).index.values, pdf.head(n).index.values) assert_eq(df.tail(n).index.values, pdf.tail(n).index.values) df = cudf.DataFrame({"a": [11, 2, 33, 44, 55]}) pdf = pd.DataFrame({"a": [11, 2, 33, 44, 55]}) assert_eq(df.head(n).index.values, pdf.head(n).index.values) assert_eq(df.tail(n).index.values, pdf.tail(n).index.values) df = cudf.DataFrame(index=[1, 2, 3]) pdf = pd.DataFrame(index=[1, 2, 3]) assert_eq(df.head(n).index.values, pdf.head(n).index.values) assert_eq(df.tail(n).index.values, pdf.tail(n).index.values) @pytest.mark.parametrize( "data,condition,other,error", [ (pd.Index(range(5)), pd.Index(range(5)) > 0, None, None), (pd.Index([1, 2, 3]), pd.Index([1, 2, 3]) != 2, None, None), (pd.Index(list("abc")), pd.Index(list("abc")) == "c", None, None), ( pd.Index(list("abc")), pd.Index(list("abc")) == "c", pd.Index(list("xyz")), None, ), (pd.Index(range(5)), pd.Index(range(4)) > 0, None, ValueError), pytest.param( pd.Index(range(5)), pd.Index(range(5)) > 1, 10, None, marks=pytest.mark.xfail( condition=not PANDAS_GE_133, reason="https://github.com/pandas-dev/pandas/issues/43240", ), ), ( pd.Index(np.arange(10)), (pd.Index(np.arange(10)) % 3) == 0, -pd.Index(np.arange(10)), None, ), ( pd.Index([1, 2, np.nan]), pd.Index([1, 2, np.nan]) == 4, None, None, ), ( pd.Index([1, 2, np.nan]), pd.Index([1, 2, np.nan]) != 4, None, None, ), ( pd.Index([-2, 3, -4, -79]), [True, True, True], None, ValueError, ), ( pd.Index([-2, 3, -4, -79]), [True, True, True, False], None, None, ), ( pd.Index([-2, 3, -4, -79]), [True, True, True, False], 17, None, ), (pd.Index(list("abcdgh")), pd.Index(list("abcdgh")) != "g", "3", None), ( pd.Index(list("abcdgh")), pd.Index(list("abcdg")) != "g", "3", ValueError, ), ( pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]), pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) != "a", "a", None, ), ( pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]), pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"]) != "a", "b", None, ), ( pd.MultiIndex.from_tuples( list( zip( *[ [ "bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux", ], [ "one", "two", "one", "two", "one", "two", "one", "two", ], ] ) ) ), pd.MultiIndex.from_tuples( list( zip( *[ [ "bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux", ], [ "one", "two", "one", "two", "one", "two", "one", "two", ], ] ) ) ) != "a", None, NotImplementedError, ), ], ) def test_index_where(data, condition, other, error): ps = data gs = cudf.from_pandas(data) ps_condition = condition if type(condition).__module__.split(".")[0] == "pandas": gs_condition = cudf.from_pandas(condition) else: gs_condition = condition ps_other = other if type(other).__module__.split(".")[0] == "pandas": gs_other = cudf.from_pandas(other) else: gs_other = other if error is None: if pd.api.types.is_categorical_dtype(ps): expect = ps.where(ps_condition, other=ps_other) got = gs.where(gs_condition, other=gs_other) np.testing.assert_array_equal( expect.codes, got.codes.astype(expect.codes.dtype).fillna(-1).to_numpy(), ) assert_eq(expect.categories, got.categories) else: assert_eq( ps.where(ps_condition, other=ps_other), gs.where(gs_condition, other=gs_other).to_pandas(), ) else: assert_exceptions_equal( lfunc=ps.where, rfunc=gs.where, lfunc_args_and_kwargs=([ps_condition], {"other": ps_other}), rfunc_args_and_kwargs=([gs_condition], {"other": gs_other}), ) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + OTHER_TYPES) @pytest.mark.parametrize("copy", [True, False]) def test_index_astype(dtype, copy): pdi = pd.Index([1, 2, 3]) gdi = cudf.from_pandas(pdi) actual = gdi.astype(dtype=dtype, copy=copy) expected = pdi.astype(dtype=dtype, copy=copy) assert_eq(expected, actual) assert_eq(pdi, gdi) @pytest.mark.parametrize( "data", [ [1, 10, 2, 100, -10], ["z", "x", "a", "c", "b"], [-10.2, 100.1, -100.2, 0.0, 0.23], ], ) def test_index_argsort(data): pdi = pd.Index(data) gdi = cudf.from_pandas(pdi) assert_eq(pdi.argsort(), gdi.argsort()) @pytest.mark.parametrize( "data", [ pd.Index([1, 10, 2, 100, -10], name="abc"), pd.Index(["z", "x", "a", "c", "b"]), pd.Index(["z", "x", "a", "c", "b"], dtype="category"), pd.Index( [-10.2, 100.1, -100.2, 0.0, 0.23], name="this is a float index" ), pd.Index([102, 1001, 1002, 0.0, 23], dtype="datetime64[ns]"), pd.Index([13240.2, 1001, 100.2, 0.0, 23], dtype="datetime64[ns]"), pd.RangeIndex(0, 10, 1), pd.RangeIndex(0, -100, -2), pd.Index([-10.2, 100.1, -100.2, 0.0, 23], dtype="timedelta64[ns]"), ], ) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("return_indexer", [True, False]) def test_index_sort_values(data, ascending, return_indexer): pdi = data gdi = cudf.from_pandas(pdi) expected = pdi.sort_values( ascending=ascending, return_indexer=return_indexer ) actual = gdi.sort_values( ascending=ascending, return_indexer=return_indexer ) if return_indexer: expected_indexer = expected[1] actual_indexer = actual[1] assert_eq(expected_indexer, actual_indexer) expected = expected[0] actual = actual[0] assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1, 10, 2, 100, -10], ["z", "x", "a", "c", "b"], [-10.2, 100.1, -100.2, 0.0, 0.23], ], ) def test_index_to_series(data): pdi = pd.Index(data) gdi = cudf.from_pandas(pdi) assert_eq(pdi.to_series(), gdi.to_series()) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 5, 6], [4, 5, 6, 10, 20, 30], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], ["5", "6", "2", "a", "b", "c"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [1.0, 5.0, 6.0, 0.0, 1.3], ["ab", "cd", "ef"], pd.Series(["1", "2", "a", "3", None], dtype="category"), range(0, 10), [], ], ) @pytest.mark.parametrize( "other", [ [1, 2, 3, 4, 5, 6], [4, 5, 6, 10, 20, 30], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], ["5", "6", "2", "a", "b", "c"], ["ab", "ef", None], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [1.0, 5.0, 6.0, 0.0, 1.3], range(2, 4), pd.Series(["1", "a", "3", None], dtype="category"), [], ], ) @pytest.mark.parametrize("sort", [None, False]) @pytest.mark.parametrize( "name_data,name_other", [("abc", "c"), (None, "abc"), ("abc", pd.NA), ("abc", "abc")], ) def test_index_difference(data, other, sort, name_data, name_other): pd_data = pd.Index(data, name=name_data) pd_other = pd.Index(other, name=name_other) gd_data = cudf.from_pandas(pd_data) gd_other = cudf.from_pandas(pd_other) expected = pd_data.difference(pd_other, sort=sort) actual = gd_data.difference(gd_other, sort=sort) assert_eq(expected, actual) @pytest.mark.parametrize("other", ["a", 1, None]) def test_index_difference_invalid_inputs(other): pdi = pd.Index([1, 2, 3]) gdi = cudf.Index([1, 2, 3]) assert_exceptions_equal( pdi.difference, gdi.difference, ([other], {}), ([other], {}), ) def test_index_difference_sort_error(): pdi = pd.Index([1, 2, 3]) gdi = cudf.Index([1, 2, 3]) assert_exceptions_equal( pdi.difference, gdi.difference, ([pdi], {"sort": True}), ([gdi], {"sort": True}), ) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) @pytest.mark.parametrize( "other", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], [], ["b", "c", "d"], [1], [2, 3, 4], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) def test_index_equals(data, other): pd_data = pd.Index(data) pd_other = pd.Index(other) gd_data = cudf.core.index.as_index(data) gd_other = cudf.core.index.as_index(other) expected = pd_data.equals(pd_other) actual = gd_data.equals(gd_other) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) @pytest.mark.parametrize( "other", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) def test_index_categories_equal(data, other): pd_data = pd.Index(data).astype("category") pd_other = pd.Index(other) gd_data = cudf.core.index.as_index(data).astype("category") gd_other = cudf.core.index.as_index(other) expected = pd_data.equals(pd_other) actual = gd_data.equals(gd_other) assert_eq(expected, actual) expected = pd_other.equals(pd_data) actual = gd_other.equals(gd_data) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) @pytest.mark.parametrize( "other", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) def test_index_equal_misc(data, other): pd_data = pd.Index(data) pd_other = other gd_data = cudf.core.index.as_index(data) gd_other = other expected = pd_data.equals(pd_other) actual = gd_data.equals(gd_other) assert_eq(expected, actual) expected = pd_data.equals(np.array(pd_other)) actual = gd_data.equals(np.array(gd_other)) assert_eq(expected, actual) expected = pd_data.equals(_create_pandas_series_float64_default(pd_other)) actual = gd_data.equals(_create_cudf_series_float64_default(gd_other)) assert_eq(expected, actual) expected = pd_data.astype("category").equals(pd_other) actual = gd_data.astype("category").equals(gd_other) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) @pytest.mark.parametrize( "other", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], ["1", "2", "3", "4", "5", "6"], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], ["a"], ["b", "c", "d"], [1], [2, 3, 4], [], [10.0], [1100.112, 2323.2322, 2323.2322], ["abcd", "defgh", "werty", "poiu"], ], ) def test_index_append(data, other): pd_data = pd.Index(data) pd_other = pd.Index(other) gd_data = cudf.core.index.as_index(data) gd_other = cudf.core.index.as_index(other) if cudf.utils.dtypes.is_mixed_with_object_dtype(gd_data, gd_other): gd_data = gd_data.astype("str") gd_other = gd_other.astype("str") expected = pd_data.append(pd_other) actual = gd_data.append(gd_other) if len(data) == 0 and len(other) == 0: # Pandas default dtype to "object" for empty list # cudf default dtype to "float" for empty list assert_eq(expected, actual.astype("str")) elif actual.dtype == "object": assert_eq(expected.astype("str"), actual) else: assert_eq(expected, actual) def test_index_empty_append_name_conflict(): empty = cudf.Index([], name="foo") non_empty = cudf.Index([1], name="bar") expected = cudf.Index([1]) result = non_empty.append(empty) assert_eq(result, expected) result = empty.append(non_empty) assert_eq(result, expected) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4, 5, 6], [10, 20, 30, 40, 50, 60], [1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [1], [2, 3, 4], [10.0], [1100.112, 2323.2322, 2323.2322], ], ) @pytest.mark.parametrize( "other", [ ["1", "2", "3", "4", "5", "6"], ["a"], ["b", "c", "d"], ["abcd", "defgh", "werty", "poiu"], ], ) def test_index_append_error(data, other): gd_data = cudf.core.index.as_index(data) gd_other = cudf.core.index.as_index(other) got_dtype = ( gd_other.dtype if gd_data.dtype == np.dtype("object") else gd_data.dtype ) with pytest.raises( TypeError, match=re.escape( f"cudf does not support appending an Index of " f"dtype `{np.dtype('object')}` with an Index " f"of dtype `{got_dtype}`, please type-cast " f"either one of them to same dtypes." ), ): gd_data.append(gd_other) with pytest.raises( TypeError, match=re.escape( f"cudf does not support appending an Index of " f"dtype `{np.dtype('object')}` with an Index " f"of dtype `{got_dtype}`, please type-cast " f"either one of them to same dtypes." ), ): gd_other.append(gd_data) sr = gd_other.to_series() assert_exceptions_equal( lfunc=gd_data.to_pandas().append, rfunc=gd_data.append, lfunc_args_and_kwargs=([[sr.to_pandas()]],), rfunc_args_and_kwargs=([[sr]],), ) @pytest.mark.parametrize( "data,other", [ ( pd.Index([1, 2, 3, 4, 5, 6]), [ pd.Index([1, 2, 3, 4, 5, 6]), pd.Index([1, 2, 3, 4, 5, 6, 10]), pd.Index([]), ], ), ( pd.Index([]), [ pd.Index([1, 2, 3, 4, 5, 6]), pd.Index([1, 2, 3, 4, 5, 6, 10]), pd.Index([1, 4, 5, 6]), ], ), ( pd.Index([10, 20, 30, 40, 50, 60]), [ pd.Index([10, 20, 30, 40, 50, 60]), pd.Index([10, 20, 30]), pd.Index([40, 50, 60]), pd.Index([10, 60]), pd.Index([60]), ], ), ( pd.Index([]), [ pd.Index([10, 20, 30, 40, 50, 60]), pd.Index([10, 20, 30]), pd.Index([40, 50, 60]), pd.Index([10, 60]), pd.Index([60]), ], ), ( pd.Index(["1", "2", "3", "4", "5", "6"]), [ pd.Index(["1", "2", "3", "4", "5", "6"]), pd.Index(["1", "2", "3"]), pd.Index(["6"]), pd.Index(["1", "6"]), ], ), ( pd.Index([]), [ pd.Index(["1", "2", "3", "4", "5", "6"]), pd.Index(["1", "2", "3"]), pd.Index(["6"]), pd.Index(["1", "6"]), ], ), ( pd.Index([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), [ pd.Index([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), pd.Index([1.0, 6.0]), pd.Index([]), pd.Index([6.0]), ], ), ( pd.Index([]), [ pd.Index([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), pd.Index([1.0, 6.0]), pd.Index([1.0, 2.0, 6.0]), pd.Index([6.0]), ], ), ( pd.Index(["a"]), [ pd.Index(["a"]), pd.Index(["a", "b", "c"]), pd.Index(["c"]), pd.Index(["d"]), pd.Index(["ae", "hello", "world"]), ], ), ( pd.Index([]), [ pd.Index(["a"]), pd.Index(["a", "b", "c"]), pd.Index(["c"]), pd.Index(["d"]), pd.Index(["ae", "hello", "world"]), pd.Index([]), ], ), ], ) def test_index_append_list(data, other): pd_data = data pd_other = other gd_data = cudf.from_pandas(data) gd_other = [cudf.from_pandas(i) for i in other] expected = pd_data.append(pd_other) actual = gd_data.append(gd_other) assert_eq(expected, actual) @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) @pytest.mark.parametrize("name", [1, "a", None]) def test_index_basic(data, dtype, name): pdi = pd.Index(data, dtype=dtype, name=name) gdi = cudf.Index(data, dtype=dtype, name=name) assert_eq(pdi, gdi) @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize("name", [1, "a", None]) @pytest.mark.parametrize("dtype", SIGNED_INTEGER_TYPES) def test_integer_index_apis(data, name, dtype): with pytest.warns(FutureWarning): pindex = pd.Int64Index(data, dtype=dtype, name=name) # Int8Index with pytest.warns(FutureWarning): gindex = cudf.Int8Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("int8") # Int16Index with pytest.warns(FutureWarning): gindex = cudf.Int16Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("int16") # Int32Index with pytest.warns(FutureWarning): gindex = cudf.Int32Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("int32") # Int64Index with pytest.warns(FutureWarning): gindex = cudf.Int64Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("int64") @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize("name", [1, "a", None]) @pytest.mark.parametrize("dtype", UNSIGNED_TYPES) def test_unsigned_integer_index_apis(data, name, dtype): with pytest.warns(FutureWarning): pindex = pd.UInt64Index(data, dtype=dtype, name=name) # UInt8Index with pytest.warns(FutureWarning): gindex = cudf.UInt8Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("uint8") # UInt16Index with pytest.warns(FutureWarning): gindex = cudf.UInt16Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("uint16") # UInt32Index with pytest.warns(FutureWarning): gindex = cudf.UInt32Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("uint32") # UInt64Index with pytest.warns(FutureWarning): gindex = cudf.UInt64Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("uint64") @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize("name", [1, "a", None]) @pytest.mark.parametrize("dtype", FLOAT_TYPES) def test_float_index_apis(data, name, dtype): with pytest.warns(FutureWarning): pindex = pd.Float64Index(data, dtype=dtype, name=name) # Float32Index with pytest.warns(FutureWarning): gindex = cudf.Float32Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("float32") # Float64Index with pytest.warns(FutureWarning): gindex = cudf.Float64Index(data, dtype=dtype, name=name) assert_eq(pindex, gindex) assert gindex.dtype == np.dtype("float64") @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize("categories", [[1, 2], None]) @pytest.mark.parametrize( "dtype", [ pd.CategoricalDtype([1, 2, 3], ordered=True), pd.CategoricalDtype([1, 2, 3], ordered=False), None, ], ) @pytest.mark.parametrize("ordered", [True, False]) @pytest.mark.parametrize("name", [1, "a", None]) def test_categorical_index_basic(data, categories, dtype, ordered, name): # can't have both dtype and categories/ordered if dtype is not None: categories = None ordered = None pindex = pd.CategoricalIndex( data=data, categories=categories, dtype=dtype, ordered=ordered, name=name, ) gindex = CategoricalIndex( data=data, categories=categories, dtype=dtype, ordered=ordered, name=name, ) assert_eq(pindex, gindex) @pytest.mark.parametrize( "data", [ pd.MultiIndex.from_arrays( [[1, 1, 2, 2], ["red", "blue", "red", "blue"]], names=("number", "color"), ), pd.MultiIndex.from_arrays( [[1, 2, 3, 4], ["yellow", "violet", "pink", "white"]], names=("number1", "color2"), ), ], ) @pytest.mark.parametrize( "other", [ pd.MultiIndex.from_arrays( [[1, 1, 2, 2], ["red", "blue", "red", "blue"]], names=("number", "color"), ), pd.MultiIndex.from_arrays( [[1, 2, 3, 4], ["yellow", "violet", "pink", "white"]], names=("number1", "color2"), ), ], ) def test_multiindex_append(data, other): pdi = data other_pd = other gdi = cudf.from_pandas(data) other_gd = cudf.from_pandas(other) expected = pdi.append(other_pd) actual = gdi.append(other_gd) assert_eq(expected, actual) @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) def test_index_empty(data, dtype): pdi = pd.Index(data, dtype=dtype) gdi = cudf.Index(data, dtype=dtype) assert_eq(pdi.empty, gdi.empty) @pytest.mark.parametrize("data", [[1, 2, 3, 4], []]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) def test_index_size(data, dtype): pdi = pd.Index(data, dtype=dtype) gdi = cudf.Index(data, dtype=dtype) assert_eq(pdi.size, gdi.size) @pytest.mark.parametrize("data", [[1, 2, 3, 1, 2, 3, 4], [], [1], [1, 2, 3]]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) def test_index_drop_duplicates(data, dtype): pdi = pd.Index(data, dtype=dtype) gdi = cudf.Index(data, dtype=dtype) assert_eq(pdi.drop_duplicates(), gdi.drop_duplicates()) def test_dropna_bad_how(): with pytest.raises(ValueError): cudf.Index([1]).dropna(how="foo") @pytest.mark.parametrize("data", [[1, 2, 3, 1, 2, 3, 4], []]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) def test_index_tolist(data, dtype): gdi = cudf.Index(data, dtype=dtype) with pytest.raises( TypeError, match=re.escape( r"cuDF does not support conversion to host memory " r"via the `tolist()` method. Consider using " r"`.to_arrow().to_pylist()` to construct a Python list." ), ): gdi.tolist() @pytest.mark.parametrize("data", [[], [1], [1, 2, 3]]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) def test_index_iter_error(data, dtype): gdi = cudf.Index(data, dtype=dtype) with pytest.raises( TypeError, match=re.escape( f"{gdi.__class__.__name__} object is not iterable. " f"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` " f"if you wish to iterate over the values." ), ): iter(gdi) @pytest.mark.parametrize("data", [[], [1], [1, 2, 3, 4, 5]]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + ["str", "category", "datetime64[ns]"] ) def test_index_values_host(data, dtype): gdi = cudf.Index(data, dtype=dtype) pdi = pd.Index(data, dtype=dtype) np.testing.assert_array_equal(gdi.values_host, pdi.values) @pytest.mark.parametrize( "data,fill_value", [ ([1, 2, 3, 1, None, None], 1), ([None, None, 3.2, 1, None, None], 10.0), ([None, "a", "3.2", "z", None, None], "helloworld"), (pd.Series(["a", "b", None], dtype="category"), "b"), (pd.Series([None, None, 1.0], dtype="category"), 1.0), ( np.array([1, 2, 3, None], dtype="datetime64[s]"), np.datetime64("2005-02-25"), ), ( np.array( [None, None, 122, 3242234, None, 6237846], dtype="datetime64[ms]", ), np.datetime64("2005-02-25"), ), ], ) def test_index_fillna(data, fill_value): pdi = pd.Index(data) gdi = cudf.Index(data) assert_eq( pdi.fillna(fill_value), gdi.fillna(fill_value), exact=False ) # Int64Index v/s Float64Index @pytest.mark.parametrize( "data", [ [1, 2, 3, 1, None, None], [None, None, 3.2, 1, None, None], [None, "a", "3.2", "z", None, None], pd.Series(["a", "b", None], dtype="category"), np.array([1, 2, 3, None], dtype="datetime64[s]"), ], ) def test_index_to_arrow(data): pdi = pd.Index(data) gdi = cudf.Index(data) expected_arrow_array = pa.Array.from_pandas(pdi) got_arrow_array = gdi.to_arrow() assert_eq(expected_arrow_array, got_arrow_array) @pytest.mark.parametrize( "data", [ [None, None, 3.2, 1, None, None], [None, "a", "3.2", "z", None, None], pd.Series(["a", "b", None], dtype="category"), np.array([1, 2, 3, None], dtype="datetime64[s]"), ], ) def test_index_from_arrow(data): pdi = pd.Index(data) arrow_array = pa.Array.from_pandas(pdi) expected_index = pd.Index(arrow_array.to_pandas()) gdi = cudf.Index.from_arrow(arrow_array) assert_eq(expected_index, gdi) def test_multiindex_to_arrow(): pdf = pd.DataFrame( { "a": [1, 2, 1, 2, 3], "b": [1.0, 2.0, 3.0, 4.0, 5.0], "c": np.array([1, 2, 3, None, 5], dtype="datetime64[s]"), "d": ["a", "b", "c", "d", "e"], } ) pdf["a"] = pdf["a"].astype("category") df = cudf.from_pandas(pdf) gdi = cudf.MultiIndex.from_frame(df) expected = pa.Table.from_pandas(pdf) got = gdi.to_arrow() assert_eq(expected, got) def test_multiindex_from_arrow(): pdf = pd.DataFrame( { "a": [1, 2, 1, 2, 3], "b": [1.0, 2.0, 3.0, 4.0, 5.0], "c": np.array([1, 2, 3, None, 5], dtype="datetime64[s]"), "d": ["a", "b", "c", "d", "e"], } ) pdf["a"] = pdf["a"].astype("category") ptb = pa.Table.from_pandas(pdf) gdi = cudf.MultiIndex.from_arrow(ptb) pdi = pd.MultiIndex.from_frame(pdf) assert_eq(pdi, gdi) def test_index_equals_categories(): lhs = cudf.CategoricalIndex( ["a", "b", "c", "b", "a"], categories=["a", "b", "c"] ) rhs = cudf.CategoricalIndex( ["a", "b", "c", "b", "a"], categories=["a", "b", "c", "_"] ) got = lhs.equals(rhs) expect = lhs.to_pandas().equals(rhs.to_pandas()) assert_eq(expect, got) def test_rangeindex_arg_validation(): with pytest.raises(TypeError): RangeIndex("1") with pytest.raises(TypeError): RangeIndex(1, "2") with pytest.raises(TypeError): RangeIndex(1, 3, "1") with pytest.raises(ValueError): RangeIndex(1, dtype="float64") with pytest.raises(ValueError): RangeIndex(1, dtype="uint64") def test_rangeindex_name_not_hashable(): with pytest.raises(ValueError): RangeIndex(range(2), name=["foo"]) with pytest.raises(ValueError): RangeIndex(range(2)).copy(name=["foo"]) def test_index_rangeindex_search_range(): # step > 0 ridx = RangeIndex(-13, 17, 4) ri = ridx.as_range for i in range(len(ridx)): assert i == search_range(ridx[i], ri, side="left") assert i + 1 == search_range(ridx[i], ri, side="right") @pytest.mark.parametrize( "rge", [(1, 10, 1), (1, 10, 3), (10, -17, -1), (10, -17, -3)], ) def test_index_rangeindex_get_item_basic(rge): pridx = pd.RangeIndex(*rge) gridx = cudf.RangeIndex(*rge) for i in range(-len(pridx), len(pridx)): assert pridx[i] == gridx[i] @pytest.mark.parametrize( "rge", [(1, 10, 3), (10, 1, -3)], ) def test_index_rangeindex_get_item_out_of_bounds(rge): gridx = cudf.RangeIndex(*rge) with pytest.raises(IndexError): _ = gridx[4] @pytest.mark.parametrize( "rge", [(10, 1, 1), (-17, 10, -3)], ) def test_index_rangeindex_get_item_null_range(rge): gridx = cudf.RangeIndex(*rge) with pytest.raises(IndexError): gridx[0] @pytest.mark.parametrize( "rge", [(-17, 21, 2), (21, -17, -3), (0, 0, 1), (0, 1, -3), (10, 0, 5)] ) @pytest.mark.parametrize( "sl", [ slice(1, 7, 1), slice(1, 7, 2), slice(-1, 7, 1), slice(-1, 7, 2), slice(-3, 7, 2), slice(7, 1, -2), slice(7, -3, -2), slice(None, None, 1), slice(0, None, 2), slice(0, None, 3), slice(0, 0, 3), ], ) def test_index_rangeindex_get_item_slices(rge, sl): pridx = pd.RangeIndex(*rge) gridx = cudf.RangeIndex(*rge) assert_eq(pridx[sl], gridx[sl]) @pytest.mark.parametrize( "idx", [ pd.Index([1, 2, 3]), pd.Index(["abc", "def", "ghi"]), pd.RangeIndex(0, 10, 1), pd.Index([0.324, 0.234, 1.3], name="abc"), ], ) @pytest.mark.parametrize("names", [None, "a", "new name", ["another name"]]) @pytest.mark.parametrize("inplace", [True, False]) def test_index_set_names(idx, names, inplace): pi = idx.copy() gi = cudf.from_pandas(idx) expected = pi.set_names(names=names, inplace=inplace) actual = gi.set_names(names=names, inplace=inplace) if inplace: expected, actual = pi, gi assert_eq(expected, actual) @pytest.mark.parametrize("idx", [pd.Index([1, 2, 3], name="abc")]) @pytest.mark.parametrize("level", [1, [0], "abc"]) @pytest.mark.parametrize("names", [None, "a"]) def test_index_set_names_error(idx, level, names): pi = idx.copy() gi = cudf.from_pandas(idx) assert_exceptions_equal( lfunc=pi.set_names, rfunc=gi.set_names, lfunc_args_and_kwargs=([], {"names": names, "level": level}), rfunc_args_and_kwargs=([], {"names": names, "level": level}), ) @pytest.mark.parametrize( "idx", [pd.Index([1, 3, 6]), pd.Index([6, 1, 3])], # monotonic # non-monotonic ) @pytest.mark.parametrize("key", list(range(0, 8))) @pytest.mark.parametrize("method", [None, "ffill", "bfill", "nearest"]) def test_get_loc_single_unique_numeric(idx, key, method): pi = idx gi = cudf.from_pandas(pi) if ( (key not in pi and method is None) # `method` only applicable to monotonic index or (not pi.is_monotonic_increasing and method is not None) # Get key before the first element is KeyError or (key == 0 and method in "ffill") # Get key after the last element is KeyError or (key == 7 and method in "bfill") ): assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [pd.RangeIndex(3, 100, 4)], ) @pytest.mark.parametrize("key", list(range(1, 110, 3))) @pytest.mark.parametrize("method", [None, "ffill"]) def test_get_loc_rangeindex(idx, key, method): pi = idx gi = cudf.from_pandas(pi) if ( (key not in pi and method is None) # Get key before the first element is KeyError or (key < pi.start and method in "ffill") # Get key after the last element is KeyError or (key >= pi.stop and method in "bfill") ): assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [ pd.Index([1, 3, 3, 6]), # monotonic pd.Index([6, 1, 3, 3]), # non-monotonic ], ) @pytest.mark.parametrize("key", [0, 3, 6, 7]) @pytest.mark.parametrize("method", [None]) def test_get_loc_single_duplicate_numeric(idx, key, method): pi = idx gi = cudf.from_pandas(pi) if key not in pi: assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [pd.Index(["b", "f", "m", "q"]), pd.Index(["m", "f", "b", "q"])] ) @pytest.mark.parametrize("key", ["a", "f", "n", "z"]) @pytest.mark.parametrize("method", [None, "ffill", "bfill"]) def test_get_loc_single_unique_string(idx, key, method): pi = idx gi = cudf.from_pandas(pi) if ( (key not in pi and method is None) # `method` only applicable to monotonic index or (not pi.is_monotonic_increasing and method is not None) # Get key before the first element is KeyError or (key == "a" and method == "ffill") # Get key after the last element is KeyError or (key == "z" and method == "bfill") ): assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [pd.Index(["b", "m", "m", "q"]), pd.Index(["m", "f", "m", "q"])] ) @pytest.mark.parametrize("key", ["a", "f", "n", "z"]) @pytest.mark.parametrize("method", [None]) def test_get_loc_single_duplicate_string(idx, key, method): pi = idx gi = cudf.from_pandas(pi) if key not in pi: assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [ pd.MultiIndex.from_tuples( [(1, 1, 1), (1, 1, 2), (1, 2, 1), (1, 2, 3), (2, 1, 1), (2, 2, 1)] ), pd.MultiIndex.from_tuples( [(2, 1, 1), (1, 2, 3), (1, 2, 1), (1, 1, 2), (2, 2, 1), (1, 1, 1)] ), pd.MultiIndex.from_tuples( [(1, 1, 1), (1, 1, 2), (1, 1, 2), (1, 2, 3), (2, 1, 1), (2, 2, 1)] ), ], ) @pytest.mark.parametrize("key", [1, (1, 2), (1, 2, 3), (2, 1, 1), (9, 9, 9)]) @pytest.mark.parametrize("method", [None]) def test_get_loc_multi_numeric(idx, key, method): pi = idx.sort_values() gi = cudf.from_pandas(pi) if key not in pi: assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [ pd.MultiIndex.from_tuples( [(2, 1, 1), (1, 2, 3), (1, 2, 1), (1, 1, 1), (1, 1, 1), (2, 2, 1)] ) ], ) @pytest.mark.parametrize( "key, result", [ (1, slice(1, 5, 1)), # deviates ((1, 2), slice(1, 3, 1)), ((1, 2, 3), slice(1, 2, None)), ((2, 1, 1), slice(0, 1, None)), ((9, 9, 9), None), ], ) @pytest.mark.parametrize("method", [None]) def test_get_loc_multi_numeric_deviate(idx, key, result, method): pi = idx gi = cudf.from_pandas(pi) with expect_warning_if( isinstance(key, tuple), pd.errors.PerformanceWarning ): key_flag = key not in pi if key_flag: with expect_warning_if( isinstance(key, tuple), pd.errors.PerformanceWarning ): assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: expected = result with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "idx", [ pd.MultiIndex.from_tuples( [ ("a", "a", "a"), ("a", "a", "b"), ("a", "b", "a"), ("a", "b", "c"), ("b", "a", "a"), ("b", "c", "a"), ] ), pd.MultiIndex.from_tuples( [ ("a", "a", "b"), ("a", "b", "c"), ("b", "a", "a"), ("a", "a", "a"), ("a", "b", "a"), ("b", "c", "a"), ] ), pd.MultiIndex.from_tuples( [ ("a", "a", "a"), ("a", "b", "c"), ("b", "a", "a"), ("a", "a", "b"), ("a", "b", "a"), ("b", "c", "a"), ] ), pd.MultiIndex.from_tuples( [ ("a", "a", "a"), ("a", "a", "b"), ("a", "a", "b"), ("a", "b", "c"), ("b", "a", "a"), ("b", "c", "a"), ] ), pd.MultiIndex.from_tuples( [ ("a", "a", "b"), ("b", "a", "a"), ("b", "a", "a"), ("a", "a", "a"), ("a", "b", "a"), ("b", "c", "a"), ] ), ], ) @pytest.mark.parametrize( "key", ["a", ("a", "a"), ("a", "b", "c"), ("b", "c", "a"), ("z", "z", "z")] ) @pytest.mark.parametrize("method", [None]) def test_get_loc_multi_string(idx, key, method): pi = idx.sort_values() gi = cudf.from_pandas(pi) if key not in pi: assert_exceptions_equal( lfunc=pi.get_loc, rfunc=gi.get_loc, lfunc_args_and_kwargs=([], {"key": key, "method": method}), rfunc_args_and_kwargs=([], {"key": key, "method": method}), ) else: with expect_warning_if(method is not None): expected = pi.get_loc(key, method=method) with expect_warning_if(method is not None): got = gi.get_loc(key, method=method) assert_eq(expected, got) @pytest.mark.parametrize( "objs", [ [pd.RangeIndex(0, 10), pd.RangeIndex(10, 20)], [pd.RangeIndex(10, 20), pd.RangeIndex(22, 40), pd.RangeIndex(50, 60)], [pd.RangeIndex(10, 20, 2), pd.RangeIndex(20, 40, 2)], ], ) def test_range_index_concat(objs): cudf_objs = [cudf.from_pandas(obj) for obj in objs] actual = cudf.concat(cudf_objs) expected = objs[0] for obj in objs[1:]: expected = expected.append(obj) assert_eq(expected, actual) @pytest.mark.parametrize( "idx1, idx2", [ (pd.RangeIndex(0, 10), pd.RangeIndex(3, 7)), (pd.RangeIndex(0, 10), pd.RangeIndex(10, 20)), (pd.RangeIndex(0, 10, 2), pd.RangeIndex(1, 5, 3)), (pd.RangeIndex(1, 5, 3), pd.RangeIndex(0, 10, 2)), (pd.RangeIndex(1, 10, 3), pd.RangeIndex(1, 5, 2)), (pd.RangeIndex(1, 5, 2), pd.RangeIndex(1, 10, 3)), (pd.RangeIndex(1, 100, 3), pd.RangeIndex(1, 50, 3)), (pd.RangeIndex(1, 100, 3), pd.RangeIndex(1, 50, 6)), (pd.RangeIndex(1, 100, 6), pd.RangeIndex(1, 50, 3)), (pd.RangeIndex(0, 10, name="a"), pd.RangeIndex(90, 100, name="b")), (pd.Index([0, 1, 2, 30], name="a"), pd.Index([90, 100])), (pd.Index([0, 1, 2, 30], name="a"), [90, 100]), (pd.Index([0, 1, 2, 30]), pd.Index([0, 10, 1.0, 11])), (pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "c", "z"])), ( pd.IntervalIndex.from_tuples([(0, 2), (0, 2), (2, 4)]), pd.IntervalIndex.from_tuples([(0, 2), (2, 4)]), ), (pd.RangeIndex(0, 10), pd.Index([8, 1, 2, 4])), (pd.Index([8, 1, 2, 4], name="a"), pd.Index([8, 1, 2, 4], name="b")), ( pd.Index([8, 1, 2, 4], name="a"), pd.Index([], name="b", dtype="int64"), ), (pd.Index([], dtype="int64", name="a"), pd.Index([10, 12], name="b")), (pd.Index([True, True, True], name="a"), pd.Index([], dtype="bool")), ( pd.Index([True, True, True]), pd.Index([False, True], dtype="bool", name="b"), ), ], ) @pytest.mark.parametrize("sort", [None, False]) def test_union_index(idx1, idx2, sort): expected = idx1.union(idx2, sort=sort) idx1 = cudf.from_pandas(idx1) if isinstance(idx1, pd.Index) else idx1 idx2 = cudf.from_pandas(idx2) if isinstance(idx2, pd.Index) else idx2 actual = idx1.union(idx2, sort=sort) assert_eq(expected, actual) def test_union_bool_with_other(): idx1 = cudf.Index([True, True, True]) idx2 = cudf.Index([0, 1], name="b") with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(cudf.errors.MixedTypeError): idx1.union(idx2) @pytest.mark.parametrize("dtype1", ["int8", "int32", "int32"]) @pytest.mark.parametrize("dtype2", ["uint32", "uint64"]) def test_union_unsigned_vs_signed(dtype1, dtype2): idx1 = cudf.Index([10, 20, 30], dtype=dtype1) idx2 = cudf.Index([0, 1], dtype=dtype2) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(cudf.errors.MixedTypeError): idx1.union(idx2) @pytest.mark.parametrize( "idx1, idx2", [ (pd.RangeIndex(0, 10), pd.RangeIndex(3, 7)), (pd.RangeIndex(0, 10), pd.RangeIndex(-10, 20)), (pd.RangeIndex(0, 10, name="a"), pd.RangeIndex(90, 100, name="b")), (pd.Index([0, 1, 2, 30], name=pd.NA), pd.Index([30, 0, 90, 100])), (pd.Index([0, 1, 2, 30], name="a"), [90, 100]), (pd.Index([0, 1, 2, 30]), pd.Index([0, 10, 1.0, 11])), ( pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "c", "z"], name="abc"), ), ( pd.Index(["a", "b", "c", "d", "c"]), pd.Index(["a", "b", "c", "d", "c"]), ), (pd.Index([True, False, True, True]), pd.Index([10, 11, 12, 0, 1, 2])), (pd.Index([True, False, True, True]), pd.Index([True, True])), (pd.RangeIndex(0, 10, name="a"), pd.Index([5, 6, 7], name="b")), (pd.Index(["a", "b", "c"], dtype="category"), pd.Index(["a", "b"])), (pd.Index(["a", "b", "c"], dtype="category"), pd.Index([1, 2, 3])), (pd.Index([0, 1, 2], dtype="category"), pd.RangeIndex(0, 10)), (pd.Index(["a", "b", "c"], name="abc"), []), (pd.Index([], name="abc"), pd.RangeIndex(0, 4)), (pd.Index([1, 2, 3]), pd.Index([1, 2], dtype="category")), (pd.Index([]), pd.Index([1, 2], dtype="category")), ], ) @pytest.mark.parametrize("sort", [None, False]) @pytest.mark.parametrize("pandas_compatible", [True, False]) def test_intersection_index(idx1, idx2, sort, pandas_compatible): expected = idx1.intersection(idx2, sort=sort) with cudf.option_context("mode.pandas_compatible", pandas_compatible): idx1 = cudf.from_pandas(idx1) if isinstance(idx1, pd.Index) else idx1 idx2 = cudf.from_pandas(idx2) if isinstance(idx2, pd.Index) else idx2 actual = idx1.intersection(idx2, sort=sort) # TODO: Resolve the bool vs ints mixed issue # once pandas has a direction on this issue # https://github.com/pandas-dev/pandas/issues/44000 assert_eq( expected, actual, exact=False if (is_bool_dtype(idx1.dtype) and not is_bool_dtype(idx2.dtype)) or (not is_bool_dtype(idx1.dtype) or is_bool_dtype(idx2.dtype)) else True, ) @pytest.mark.parametrize( "data", [ [1, 2, 3], ["a", "v", "d"], [234.243, 2432.3, None], [True, False, True], pd.Series(["a", " ", "v"], dtype="category"), pd.IntervalIndex.from_breaks([0, 1, 2, 3]), ], ) @pytest.mark.parametrize( "func", [ "is_numeric", "is_boolean", "is_integer", "is_floating", "is_object", "is_categorical", "is_interval", ], ) def test_index_type_methods(data, func): pidx = pd.Index(data) gidx = cudf.from_pandas(pidx) if PANDAS_GE_200: with pytest.warns(FutureWarning): expected = getattr(pidx, func)() else: expected = getattr(pidx, func)() with pytest.warns(FutureWarning): actual = getattr(gidx, func)() if gidx.dtype == np.dtype("bool") and func == "is_object": assert_eq(False, actual) else: assert_eq(expected, actual) @pytest.mark.parametrize( "resolution", ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"] ) def test_index_datetime_ceil(resolution): cuidx = cudf.DatetimeIndex([1000000, 2000000, 3000000, 4000000, 5000000]) pidx = cuidx.to_pandas() pidx_ceil = pidx.ceil(resolution) cuidx_ceil = cuidx.ceil(resolution) assert_eq(pidx_ceil, cuidx_ceil) @pytest.mark.parametrize( "resolution", ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"] ) def test_index_datetime_floor(resolution): cuidx = cudf.DatetimeIndex([1000000, 2000000, 3000000, 4000000, 5000000]) pidx = cuidx.to_pandas() pidx_floor = pidx.floor(resolution) cuidx_floor = cuidx.floor(resolution) assert_eq(pidx_floor, cuidx_floor) @pytest.mark.parametrize( "resolution", ["D", "H", "T", "min", "S", "L", "ms", "U", "us", "N"] ) def test_index_datetime_round(resolution): cuidx = cudf.DatetimeIndex([1000000, 2000000, 3000000, 4000000, 5000000]) pidx = cuidx.to_pandas() pidx_floor = pidx.round(resolution) cuidx_floor = cuidx.round(resolution) assert_eq(pidx_floor, cuidx_floor) @pytest.mark.parametrize( "data,nan_idx,NA_idx", [([1, 2, 3, None], None, 3), ([2, 3, np.nan, None], 2, 3)], ) @pytest.mark.parametrize("nan_as_null", [True, False]) def test_index_nan_as_null(data, nan_idx, NA_idx, nan_as_null): idx = cudf.Index(data, nan_as_null=nan_as_null) if nan_as_null: if nan_idx is not None: assert idx[nan_idx] is cudf.NA else: if nan_idx is not None: assert np.isnan(idx[nan_idx]) if NA_idx is not None: assert idx[NA_idx] is cudf.NA @pytest.mark.parametrize( "data", [ [], pd.Series( ["this", "is", None, "a", "test"], index=["a", "b", "c", "d", "e"] ), pd.Series([0, 15, 10], index=[0, None, 9]), pd.Series( range(25), index=pd.date_range( start="2019-01-01", end="2019-01-02", freq="H" ), ), ], ) @pytest.mark.parametrize( "values", [ [], ["this", "is"], [0, 19, 13], ["2019-01-01 04:00:00", "2019-01-01 06:00:00", "2018-03-02 10:00:00"], ], ) def test_isin_index(data, values): psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.index.isin(values) expected = psr.index.isin(values) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ pd.MultiIndex.from_arrays( [[1, 2, 3], ["red", "blue", "green"]], names=("number", "color") ), pd.MultiIndex.from_arrays([[], []], names=("number", "color")), pd.MultiIndex.from_arrays( [[1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"]], names=("number", "color"), ), pd.MultiIndex.from_product( [[0, 1], ["red", "blue", "green"]], names=("number", "color") ), ], ) @pytest.mark.parametrize( "values,level,err", [ ([(1, "red"), (2, "blue"), (0, "green")], None, None), (["red", "orange", "yellow"], "color", None), (["red", "white", "yellow"], "color", None), ([0, 1, 2, 10, 11, 15], "number", None), ([0, 1, 2, 10, 11, 15], None, TypeError), (pd.Series([0, 1, 2, 10, 11, 15]), None, TypeError), (pd.Index([0, 1, 2, 10, 11, 15]), None, TypeError), (pd.Index([0, 1, 2, 8, 11, 15]), "number", None), (pd.Index(["red", "white", "yellow"]), "color", None), ([(1, "red"), (3, "red")], None, None), (((1, "red"), (3, "red")), None, None), ( pd.MultiIndex.from_arrays( [[1, 2, 3], ["red", "blue", "green"]], names=("number", "color"), ), None, None, ), ( pd.MultiIndex.from_arrays([[], []], names=("number", "color")), None, None, ), ( pd.MultiIndex.from_arrays( [ [1, 2, 3, 10, 100], ["red", "blue", "green", "pink", "white"], ], names=("number", "color"), ), None, None, ), ], ) def test_isin_multiindex(data, values, level, err): pmdx = data gmdx = cudf.from_pandas(data) if err is None: expected = pmdx.isin(values, level=level) if isinstance(values, pd.MultiIndex): values = cudf.from_pandas(values) got = gmdx.isin(values, level=level) assert_eq(got, expected) else: assert_exceptions_equal( lfunc=pmdx.isin, rfunc=gmdx.isin, lfunc_args_and_kwargs=([values], {"level": level}), rfunc_args_and_kwargs=([values], {"level": level}), check_exception_type=False, ) range_data = [ range(np.random.randint(0, 100)), range(9, 12, 2), range(20, 30), range(100, 1000, 10), range(0, 10, -2), range(0, -10, 2), range(0, -10, -2), ] @pytest.fixture(params=range_data) def rangeindex(request): """Create a cudf RangeIndex of different `nrows`""" return RangeIndex(request.param) @pytest.mark.parametrize( "func", ["nunique", "min", "max", "any", "values"], ) def test_rangeindex_methods(rangeindex, func): gidx = rangeindex pidx = gidx.to_pandas() if func == "values": expected = pidx.values actual = gidx.values else: expected = getattr(pidx, func)() actual = getattr(gidx, func)() assert_eq(expected, actual) def test_index_constructor_integer(default_integer_bitwidth): got = cudf.Index([1, 2, 3]) expect = cudf.Index([1, 2, 3], dtype=f"int{default_integer_bitwidth}") assert_eq(expect, got) def test_index_constructor_float(default_float_bitwidth): got = cudf.Index([1.0, 2.0, 3.0]) expect = cudf.Index( [1.0, 2.0, 3.0], dtype=f"float{default_float_bitwidth}" ) assert_eq(expect, got) def test_rangeindex_union_default_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for union operation. idx1 = cudf.RangeIndex(0, 2) idx2 = cudf.RangeIndex(5, 6) expected = cudf.Index([0, 1, 5], dtype=f"int{default_integer_bitwidth}") actual = idx1.union(idx2) assert_eq(expected, actual) def test_rangeindex_intersection_default_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for intersection operation. idx1 = cudf.RangeIndex(0, 100) # Intersecting two RangeIndex will _always_ result in a RangeIndex, use # regular index here to force materializing. idx2 = cudf.Index([50, 102]) expected = cudf.Index([50], dtype=f"int{default_integer_bitwidth}") actual = idx1.intersection(idx2) assert_eq(expected, actual) def test_rangeindex_take_default_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for take operation. idx = cudf.RangeIndex(0, 100) actual = idx.take([0, 3, 7, 62]) expected = cudf.Index( [0, 3, 7, 62], dtype=f"int{default_integer_bitwidth}" ) assert_eq(expected, actual) def test_rangeindex_apply_boolean_mask_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for apply boolean mask operation. idx = cudf.RangeIndex(0, 8) mask = [True, True, True, False, False, False, True, False] actual = idx[mask] expected = cudf.Index([0, 1, 2, 6], dtype=f"int{default_integer_bitwidth}") assert_eq(expected, actual) def test_rangeindex_repeat_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for repeat operation. idx = cudf.RangeIndex(0, 3) actual = idx.repeat(3) expected = cudf.Index( [0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=f"int{default_integer_bitwidth}" ) assert_eq(expected, actual) @pytest.mark.parametrize( "op, expected, expected_kind", [ (lambda idx: 2**idx, [2, 4, 8, 16], "int"), (lambda idx: idx**2, [1, 4, 9, 16], "int"), (lambda idx: idx / 2, [0.5, 1, 1.5, 2], "float"), (lambda idx: 2 / idx, [2, 1, 2 / 3, 0.5], "float"), (lambda idx: idx % 3, [1, 2, 0, 1], "int"), (lambda idx: 3 % idx, [0, 1, 0, 3], "int"), ], ) def test_rangeindex_binops_user_option( op, expected, expected_kind, default_integer_bitwidth ): # Test that RangeIndex is materialized into 32 bit index under user # configuration for binary operation. idx = cudf.RangeIndex(1, 5) actual = op(idx) expected = cudf.Index( expected, dtype=f"{expected_kind}{default_integer_bitwidth}" ) assert_eq( expected, actual, ) @pytest.mark.parametrize( "op", [operator.add, operator.sub, operator.mul, operator.truediv] ) def test_rangeindex_binop_diff_names_none(op): idx1 = cudf.RangeIndex(10, 13, name="foo") idx2 = cudf.RangeIndex(13, 16, name="bar") result = op(idx1, idx2) expected = op(idx1.to_pandas(), idx2.to_pandas()) assert_eq(result, expected) assert result.name is None def test_rangeindex_join_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for join. idx1 = cudf.RangeIndex(0, 10, name="a") idx2 = cudf.RangeIndex(5, 15, name="b") actual = idx1.join(idx2, how="inner", sort=True) expected = idx1.to_pandas().join(idx2.to_pandas(), how="inner", sort=True) assert actual.dtype == cudf.dtype(f"int{default_integer_bitwidth}") assert_eq(expected, actual) def test_rangeindex_where_user_option(default_integer_bitwidth): # Test that RangeIndex is materialized into 32 bit index under user # configuration for where operation. idx = cudf.RangeIndex(0, 10) mask = [True, False, True, False, True, False, True, False, True, False] actual = idx.where(mask, -1) expected = cudf.Index( [0, -1, 2, -1, 4, -1, 6, -1, 8, -1], dtype=f"int{default_integer_bitwidth}", ) assert_eq(expected, actual) def test_rangeindex_append_return_rangeindex(): idx = cudf.RangeIndex(0, 10) result = idx.append([]) assert_eq(idx, result) result = idx.append(cudf.Index([10])) expected = cudf.RangeIndex(0, 11) assert_eq(result, expected) index_data = [ range(np.random.randint(0, 100)), range(0, 10, -2), range(0, -10, 2), range(0, -10, -2), range(0, 1), [1, 2, 3, 1, None, None], [None, None, 3.2, 1, None, None], [None, "a", "3.2", "z", None, None], pd.Series(["a", "b", None], dtype="category"), np.array([1, 2, 3, None], dtype="datetime64[s]"), ] @pytest.fixture(params=index_data) def index(request): """Create a cudf Index of different dtypes""" return cudf.Index(request.param) @pytest.mark.parametrize( "func", [ "to_series", "isna", "notna", "append", ], ) def test_index_methods(index, func): gidx = index pidx = gidx.to_pandas() if func == "append": expected = pidx.append(other=pidx) actual = gidx.append(other=gidx) else: expected = getattr(pidx, func)() actual = getattr(gidx, func)() assert_eq(expected, actual) @pytest.mark.parametrize( "idx, values", [ (range(100, 1000, 10), [200, 600, 800]), ([None, "a", "3.2", "z", None, None], ["a", "z"]), (pd.Series(["a", "b", None], dtype="category"), [10, None]), ], ) def test_index_isin_values(idx, values): gidx = cudf.Index(idx) pidx = gidx.to_pandas() actual = gidx.isin(values) expected = pidx.isin(values) assert_eq(expected, actual) @pytest.mark.parametrize( "idx, scalar", [ (range(0, -10, -2), -4), ([None, "a", "3.2", "z", None, None], "x"), (pd.Series(["a", "b", None], dtype="category"), 10), ], ) def test_index_isin_scalar_values(idx, scalar): gidx = cudf.Index(idx) with pytest.raises( TypeError, match=re.escape( f"only list-like objects are allowed to be passed " f"to isin(), you passed a {type(scalar).__name__}" ), ): gidx.isin(scalar) def test_index_any(): gidx = cudf.Index([1, 2, 3]) pidx = gidx.to_pandas() assert_eq(pidx.any(), gidx.any()) def test_index_values(): gidx = cudf.Index([1, 2, 3]) pidx = gidx.to_pandas() assert_eq(pidx.values, gidx.values) def test_index_null_values(): gidx = cudf.Index([1.0, None, 3, 0, None]) with pytest.raises(ValueError): gidx.values def test_index_error_list_index(): s = cudf.Series([[1, 2], [2], [4]]) with pytest.raises( NotImplementedError, match=re.escape( "Unsupported column type passed to create an " "Index: <class 'cudf.core.column.lists.ListColumn'>" ), ): cudf.Index(s) @pytest.mark.parametrize( "data", [ [1, 2, 3], pytest.param( [np.nan, 10, 15, 16], marks=pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/49818" ), ), range(0, 10), [np.nan, None, 10, 20], ["ab", "zx", "pq"], ["ab", "zx", None, "pq"], ], ) def test_index_hasnans(data): gs = cudf.Index(data, nan_as_null=False) ps = gs.to_pandas(nullable=True) # Check type to avoid mixing Python bool and NumPy bool assert isinstance(gs.hasnans, bool) assert gs.hasnans == ps.hasnans @pytest.mark.parametrize( "data", [ [1, 2, 3, 1, 1, 3, 2, 3], [np.nan, 10, 15, 16, np.nan, 10, 16], range(0, 10), ["ab", "zx", None, "pq", "ab", None, "zx", None], ], ) @pytest.mark.parametrize("keep", ["first", "last", False]) def test_index_duplicated(data, keep): gs = cudf.Index(data) ps = gs.to_pandas() expected = ps.duplicated(keep=keep) actual = gs.duplicated(keep=keep) assert_eq(expected, actual) @pytest.mark.parametrize( "data,expected_dtype", [ ([10, 11, 12], pd.Int64Dtype()), ([0.1, 10.2, 12.3], pd.Float64Dtype()), (["abc", None, "def"], pd.StringDtype()), ], ) def test_index_to_pandas_nullable(data, expected_dtype): gi = cudf.Index(data) pi = gi.to_pandas(nullable=True) expected = pd.Index(data, dtype=expected_dtype) assert_eq(pi, expected) class TestIndexScalarGetItem: @pytest.fixture( params=[range(1, 10, 2), [1, 2, 3], ["a", "b", "c"], [1.5, 2.5, 3.5]] ) def index_values(self, request): return request.param @pytest.fixture(params=[int, np.int8, np.int32, np.int64]) def i(self, request): return request.param(1) def test_scalar_getitem(self, index_values, i): index = cudf.Index(index_values) assert not isinstance(index[i], cudf.Index) assert index[i] == index_values[i] assert_eq(index, index.to_pandas()) @pytest.mark.parametrize( "data", [ [ pd.Timestamp("1970-01-01 00:00:00.000000001"), pd.Timestamp("1970-01-01 00:00:00.000000002"), 12, 20, ], [ pd.Timedelta(10), pd.Timedelta(20), 12, 20, ], [1, 2, 3, 4], ], ) def test_index_mixed_dtype_error(data): pi = pd.Index(data, dtype="object") with pytest.raises(TypeError): cudf.Index(pi) @pytest.mark.parametrize("cls", [pd.DatetimeIndex, pd.TimedeltaIndex]) def test_index_date_duration_freq_error(cls): s = cls([1, 2, 3], freq="infer") with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.Index(s) @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) def test_index_getitem_time_duration(dtype): gidx = cudf.Index([1, 2, 3, 4, None], dtype=dtype) pidx = gidx.to_pandas() with cudf.option_context("mode.pandas_compatible", True): for i in range(len(gidx)): if i == 4: assert gidx[i] is pidx[i] else: assert_eq(gidx[i], pidx[i]) @pytest.mark.parametrize("dtype", ALL_TYPES) def test_index_empty_from_pandas(request, dtype): request.node.add_marker( pytest.mark.xfail( condition=not PANDAS_GE_200 and dtype in { "datetime64[ms]", "datetime64[s]", "datetime64[us]", "timedelta64[ms]", "timedelta64[s]", "timedelta64[us]", }, reason="Fixed in pandas-2.0", ) ) pidx = pd.Index([], dtype=dtype) gidx = cudf.from_pandas(pidx) assert_eq(pidx, gidx) def test_empty_index_init(): pidx = pd.Index([]) gidx = cudf.Index([]) assert_eq(pidx, gidx) @pytest.mark.parametrize( "data", [[1, 2, 3], ["ab", "cd", "e", None], range(0, 10)] ) @pytest.mark.parametrize("data_name", [None, 1, "abc"]) @pytest.mark.parametrize("index", [True, False]) @pytest.mark.parametrize("name", [None, no_default, 1, "abc"]) def test_index_to_frame(data, data_name, index, name): pidx = pd.Index(data, name=data_name) gidx = cudf.from_pandas(pidx) with expect_warning_if(name is None): expected = pidx.to_frame(index=index, name=name) with expect_warning_if(name is None): actual = gidx.to_frame(index=index, name=name) assert_eq(expected, actual) @pytest.mark.parametrize("data", [[1, 2, 3], range(0, 10)]) @pytest.mark.parametrize("dtype", ["str", "int64", "float64"]) def test_index_with_index_dtype(data, dtype): pidx = pd.Index(data) gidx = cudf.Index(data) expected = pd.Index(pidx, dtype=dtype) actual = cudf.Index(gidx, dtype=dtype) assert_eq(expected, actual) def test_period_index_error(): pidx = pd.PeriodIndex(year=[2000, 2002], quarter=[1, 3]) with pytest.raises(NotImplementedError): cudf.from_pandas(pidx) with pytest.raises(NotImplementedError): cudf.Index(pidx) with pytest.raises(NotImplementedError): cudf.Series(pidx) with pytest.raises(NotImplementedError): cudf.Series(pd.Series(pidx)) with pytest.raises(NotImplementedError): cudf.Series(pd.array(pidx)) def test_index_from_dataframe_valueerror(): with pytest.raises(ValueError): cudf.Index(cudf.DataFrame(range(1))) def test_index_from_scalar_valueerror(): with pytest.raises(ValueError): cudf.Index(11) @pytest.mark.parametrize("idx", [0, np.int64(0)]) def test_index_getitem_from_int(idx): result = cudf.Index([1, 2])[idx] assert result == 1 @pytest.mark.parametrize("idx", [1.5, True, "foo"]) def test_index_getitem_from_nonint_raises(idx): with pytest.raises(ValueError): cudf.Index([1, 2])[idx]
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_seriesmap.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. from itertools import product from math import floor import numpy as np import pandas as pd import pytest import cudf from cudf import Series from cudf.testing._utils import assert_eq, assert_exceptions_equal def test_series_map_basic(): gd1 = cudf.Series(["cat", np.nan, "rabbit", "dog"]) pdf1 = gd1.to_pandas() expected_dict = pdf1.map({"cat": "kitten", "dog": "puppy"}) actual_dict = gd1.map({"cat": "kitten", "dog": "puppy"}) assert_eq(expected_dict, actual_dict) @pytest.mark.parametrize("name", ["a", None, 2]) def test_series_map_series_input(name): gd1 = cudf.Series(["cat", "dog", np.nan, "rabbit"], name=name) pdf1 = gd1.to_pandas() expected_series = pdf1.map(pd.Series({"cat": "kitten", "dog": "puppy"})) actual_series = gd1.map(cudf.Series({"cat": "kitten", "dog": "puppy"})) assert_eq(expected_series, actual_series) def test_series_map_callable_numeric_basic(): gd2 = cudf.Series([1, 2, 3, 4, np.nan]) pdf2 = gd2.to_pandas() expected_function = pdf2.map(lambda x: x**2) actual_function = gd2.map(lambda x: x**2) assert_eq(expected_function, actual_function) @pytest.mark.parametrize("nelem", list(product([2, 10, 100, 1000]))) def test_series_map_callable_numeric_random(nelem): # Generate data np.random.seed(0) data = np.random.random(nelem) * 100 sr = Series(data) pdsr = pd.Series(data) # Call map got = sr.map(lambda x: (floor(x) + 1 if x - floor(x) >= 0.5 else floor(x))) expect = pdsr.map( lambda x: (floor(x) + 1 if x - floor(x) >= 0.5 else floor(x)) ) # Check assert_eq(expect, got, check_dtype=False) def test_series_map_callable_numeric_random_dtype_change(): # Test for changing the out_dtype using map data = list(range(10)) sr = Series(data) pdsr = pd.Series(data) got = sr.map(lambda x: float(x)) expect = pdsr.map(lambda x: float(x)) # Check assert_eq(expect, got) def test_series_map_non_unique_index(): # test for checking correct error is produced gd1 = cudf.Series([1, 2, 3, 4, np.nan]) pd1 = pd.Series([1, 2, 3, 4, np.nan]) gd_map_series = cudf.Series(["a", "b", "c"], index=[1, 1, 2]) pd_map_series = pd.Series(["a", "b", "c"], index=[1, 1, 2]) assert_exceptions_equal( lfunc=pd1.map, rfunc=gd1.map, check_exception_type=False, lfunc_args_and_kwargs=([pd_map_series],), rfunc_args_and_kwargs=([gd_map_series],), )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_gcs.py
# Copyright (c) 2020-2022, NVIDIA CORPORATION. import io import os import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq gcsfs = pytest.importorskip("gcsfs") TEST_PROJECT = "cudf-gcs-test-project" TEST_BUCKET = "cudf-gcs-test-bucket" @pytest.fixture def pdf(scope="module"): df = pd.DataFrame() df["Integer"] = np.array([2345, 11987, 9027, 9027]) df["Float"] = np.array([9.001, 8.343, 6, 2.781]) df["Integer2"] = np.array([2345, 106, 2088, 789277]) df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"]) df["Boolean"] = np.array([True, False, True, False]) return df def test_read_csv(pdf, monkeypatch, tmpdir): # Write to buffer fpath = TEST_BUCKET + "test_csv_reader.csv" buffer = pdf.to_csv(index=False) def mock_open(*args, **kwargs): return io.BytesIO(buffer.encode()) def mock_size(*args): return len(buffer.encode()) monkeypatch.setattr(gcsfs.core.GCSFileSystem, "open", mock_open) monkeypatch.setattr(gcsfs.core.GCSFileSystem, "size", mock_size) # Test read from explicit path. # Since we are monkey-patching, we cannot use # use_python_file_object=True, because the pyarrow # `open_input_file` command will fail (since it doesn't # use the monkey-patched `open` definition) got = cudf.read_csv(f"gcs://{fpath}", use_python_file_object=False) assert_eq(pdf, got) # AbstractBufferedFile -> PythonFile conversion # will work fine with the monkey-patched FS if we # pass in an fsspec file object fs = gcsfs.core.GCSFileSystem() with fs.open(f"gcs://{fpath}") as f: got = cudf.read_csv(f) assert_eq(pdf, got) def test_write_orc(pdf, monkeypatch, tmpdir): gcs_fname = TEST_BUCKET + "test_orc_writer.orc" local_filepath = os.path.join(tmpdir, "test_orc.orc") gdf = cudf.from_pandas(pdf) def mock_open(*args, **kwargs): return open(local_filepath, "wb") monkeypatch.setattr(gcsfs.core.GCSFileSystem, "open", mock_open) gdf.to_orc(f"gcs://{gcs_fname}") got = pd.read_orc(local_filepath) assert_eq(pdf, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_scan.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. from itertools import product import numpy as np import pandas as pd import pytest import cudf from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype, Decimal128Dtype from cudf.testing._utils import ( INTEGER_TYPES, NUMERIC_TYPES, assert_eq, gen_rand, ) params_sizes = [0, 1, 2, 5] def _gen_params(): for t, n in product(NUMERIC_TYPES, params_sizes): if (t == np.int8 or t == np.int16) and n > 20: # to keep data in range continue yield t, n @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cumsum(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = cudf.Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cumsum().to_numpy(), ps.cumsum(), decimal=decimal ) # dataframe series (named series) gdf = cudf.DataFrame() gdf["a"] = cudf.Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cumsum().to_numpy(), pdf.a.cumsum(), decimal=decimal ) def test_cumsum_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = cudf.Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cumsum(), ps.cumsum()) for type_ in INTEGER_TYPES: gs = cudf.Series(data).astype(type_) got = gs.cumsum() expected = pd.Series([1, 3, np.nan, 7, 12], dtype="float64") assert_eq(got, expected) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(8, 4), Decimal64Dtype(10, 5), Decimal64Dtype(12, 7), Decimal32Dtype(8, 5), Decimal128Dtype(13, 6), ], ) def test_cumsum_decimal(dtype): data = ["243.32", "48.245", "-7234.298", np.nan, "-467.2"] gser = cudf.Series(data).astype(dtype) pser = pd.Series(data, dtype="float64") got = gser.cumsum() expected = cudf.Series.from_pandas(pser.cumsum()).astype(dtype) assert_eq(got, expected) @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cummin(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = cudf.Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cummin().to_numpy(), ps.cummin(), decimal=decimal ) # dataframe series (named series) gdf = cudf.DataFrame() gdf["a"] = cudf.Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cummin().to_numpy(), pdf.a.cummin(), decimal=decimal ) def test_cummin_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = cudf.Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cummin(), ps.cummin()) for type_ in INTEGER_TYPES: gs = cudf.Series(data).astype(type_) expected = pd.Series([1, 1, np.nan, 1, 1]).astype("float64") assert_eq(gs.cummin(), expected) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(8, 4), Decimal64Dtype(11, 6), Decimal64Dtype(14, 7), Decimal32Dtype(8, 4), Decimal128Dtype(11, 6), ], ) def test_cummin_decimal(dtype): data = ["8394.294", np.nan, "-9940.444", np.nan, "-23.928"] gser = cudf.Series(data).astype(dtype) pser = pd.Series(data, dtype="float64") got = gser.cummin() expected = cudf.Series.from_pandas(pser.cummin()).astype(dtype) assert_eq(got, expected) @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cummax(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = cudf.Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cummax().to_numpy(), ps.cummax(), decimal=decimal ) # dataframe series (named series) gdf = cudf.DataFrame() gdf["a"] = cudf.Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cummax().to_numpy(), pdf.a.cummax(), decimal=decimal ) def test_cummax_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = cudf.Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cummax(), ps.cummax()) for type_ in INTEGER_TYPES: gs = cudf.Series(data).astype(type_) expected = pd.Series([1, 2, np.nan, 4, 5]).astype("float64") assert_eq(gs.cummax(), expected) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(8, 4), Decimal64Dtype(11, 6), Decimal64Dtype(14, 7), Decimal32Dtype(8, 4), Decimal128Dtype(11, 6), ], ) def test_cummax_decimal(dtype): data = [np.nan, "54.203", "8.222", "644.32", "-562.272"] gser = cudf.Series(data).astype(dtype) pser = pd.Series(data, dtype="float64") got = gser.cummax() expected = cudf.Series.from_pandas(pser.cummax()).astype(dtype) assert_eq(got, expected) @pytest.mark.parametrize("dtype,nelem", list(_gen_params())) def test_cumprod(dtype, nelem): if dtype == np.int8: # to keep data in range data = gen_rand(dtype, nelem, low=-2, high=2) else: data = gen_rand(dtype, nelem) decimal = 4 if dtype == np.float32 else 6 # series gs = cudf.Series(data) ps = pd.Series(data) np.testing.assert_array_almost_equal( gs.cumprod().to_numpy(), ps.cumprod(), decimal=decimal ) # dataframe series (named series) gdf = cudf.DataFrame() gdf["a"] = cudf.Series(data) pdf = pd.DataFrame() pdf["a"] = pd.Series(data) np.testing.assert_array_almost_equal( gdf.a.cumprod().to_numpy(), pdf.a.cumprod(), decimal=decimal ) def test_cumprod_masked(): data = [1, 2, None, 4, 5] float_types = ["float32", "float64"] for type_ in float_types: gs = cudf.Series(data).astype(type_) ps = pd.Series(data).astype(type_) assert_eq(gs.cumprod(), ps.cumprod()) for type_ in INTEGER_TYPES: gs = cudf.Series(data).astype(type_) got = gs.cumprod() expected = pd.Series([1, 2, np.nan, 8, 40], dtype="float64") assert_eq(got, expected) def test_scan_boolean_cumsum(): s = cudf.Series([0, -1, -300, 23, 4, -3, 0, 0, 100]) # cumsum test got = (s > 0).cumsum() expect = (s > 0).to_pandas().cumsum() assert_eq(expect, got) def test_scan_boolean_cumprod(): s = cudf.Series([0, -1, -300, 23, 4, -3, 0, 0, 100]) # cumprod test got = (s > 0).cumprod() expect = (s > 0).to_pandas().cumprod() assert_eq(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_offset.py
# Copyright (c) 2021, NVIDIA CORPORATION. import re import numpy as np import pytest from cudf import DateOffset INT64MAX = np.iinfo("int64").max @pytest.mark.parametrize("period", [1.5, 0.5, "string", "1", "1.0"]) @pytest.mark.parametrize("freq", ["years", "months"]) def test_construction_invalid(period, freq): kwargs = {freq: period} with pytest.raises(ValueError): DateOffset(**kwargs) @pytest.mark.parametrize( "unit", ["nanoseconds", "microseconds", "milliseconds", "seconds"] ) def test_construct_max_offset(unit): DateOffset(**{unit: np.iinfo("int64").max}) @pytest.mark.parametrize( "kwargs", [ {"seconds": INT64MAX + 1}, {"seconds": INT64MAX, "minutes": 1}, {"minutes": INT64MAX}, ], ) def test_offset_construction_overflow(kwargs): with pytest.raises(NotImplementedError): DateOffset(**kwargs) @pytest.mark.parametrize( "unit", [ "years", "months", "weeks", "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ], ) @pytest.mark.parametrize("period", [0.5, -0.5, 0.71]) def test_offset_no_fractional_periods(unit, period): with pytest.raises( ValueError, match=re.escape("Non-integer periods not supported") ): DateOffset(**{unit: period})
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_setitem.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest import cudf from cudf.core._compat import PANDAS_GE_150 from cudf.testing._utils import assert_eq, assert_exceptions_equal @pytest.mark.parametrize("df", [pd.DataFrame({"a": [1, 2, 3]})]) @pytest.mark.parametrize("arg", [[True, False, True], [True, True, True]]) @pytest.mark.parametrize("value", [0, -1]) def test_dataframe_setitem_bool_mask_scaler(df, arg, value): gdf = cudf.from_pandas(df) df[arg] = value gdf[arg] = value assert_eq(df, gdf) def test_dataframe_setitem_scaler_bool(): df = pd.DataFrame({"a": [1, 2, 3]}) df[[True, False, True]] = pd.DataFrame({"a": [-1, -2]}) gdf = cudf.DataFrame({"a": [1, 2, 3]}) gdf[[True, False, True]] = cudf.DataFrame({"a": [-1, -2]}) assert_eq(df, gdf) @pytest.mark.parametrize( "df", [pd.DataFrame({"a": [1, 2, 3]}), pd.DataFrame({"a": ["x", "y", "z"]})], ) @pytest.mark.parametrize("arg", [["a"], "a", "b"]) @pytest.mark.parametrize( "value", [-10, pd.DataFrame({"a": [-1, -2, -3]}), "abc"] ) def test_dataframe_setitem_columns(df, arg, value): gdf = cudf.from_pandas(df) cudf_replace_value = value if isinstance(cudf_replace_value, pd.DataFrame): cudf_replace_value = cudf.from_pandas(value) df[arg] = value gdf[arg] = cudf_replace_value assert_eq(df, gdf, check_dtype=False) @pytest.mark.parametrize("df", [pd.DataFrame({"a": [1, 2, 3]})]) @pytest.mark.parametrize("arg", [["b", "c"]]) @pytest.mark.parametrize( "value", [ pd.DataFrame({"0": [-1, -2, -3], "1": [-0, -10, -1]}), 10, 20, 30, "rapids", "ai", 0.32234, np.datetime64(1324232423423342, "ns"), np.timedelta64(34234324234324234, "ns"), ], ) def test_dataframe_setitem_new_columns(df, arg, value): gdf = cudf.from_pandas(df) cudf_replace_value = value if isinstance(cudf_replace_value, pd.DataFrame): cudf_replace_value = cudf.from_pandas(value) df[arg] = value gdf[arg] = cudf_replace_value assert_eq(df, gdf, check_dtype=True) # set_item_series inconsistency def test_series_setitem_index(): df = pd.DataFrame( data={"b": [-1, -2, -3], "c": [1, 2, 3]}, index=[1, 2, 3] ) df["b"] = pd.Series(data=[12, 11, 10], index=[3, 2, 1]) gdf = cudf.DataFrame( data={"b": [-1, -2, -3], "c": [1, 2, 3]}, index=[1, 2, 3] ) gdf["b"] = cudf.Series(data=[12, 11, 10], index=[3, 2, 1]) assert_eq(df, gdf, check_dtype=False) @pytest.mark.parametrize("psr", [pd.Series([1, 2, 3], index=["a", "b", "c"])]) @pytest.mark.parametrize( "arg", ["b", ["a", "c"], slice(1, 2, 1), [True, False, True]] ) def test_series_set_item(psr, arg): gsr = cudf.from_pandas(psr) psr[arg] = 11 gsr[arg] = 11 assert_eq(psr, gsr) def test_series_setitem_singleton_range(): sr = cudf.Series([1, 2, 3], dtype=np.int64) psr = sr.to_pandas() value = np.asarray([7], dtype=np.int64) sr.iloc[:1] = value psr.iloc[:1] = value assert_eq(sr, cudf.Series([7, 2, 3], dtype=np.int64)) assert_eq(sr, psr, check_dtype=True) @pytest.mark.parametrize( "df", [ pd.DataFrame( {"a": [1, 2, 3]}, index=pd.MultiIndex.from_frame( pd.DataFrame({"b": [3, 2, 1], "c": ["a", "b", "c"]}) ), ), pd.DataFrame({"a": [1, 2, 3]}, index=["a", "b", "c"]), ], ) def test_setitem_dataframe_series_inplace(df): pdf = df.copy(deep=True) gdf = cudf.from_pandas(pdf) pdf["a"].replace(1, 500, inplace=True) gdf["a"].replace(1, 500, inplace=True) assert_eq(pdf, gdf) psr_a = pdf["a"] gsr_a = gdf["a"] psr_a.replace(500, 501, inplace=True) gsr_a.replace(500, 501, inplace=True) assert_eq(pdf, gdf) @pytest.mark.parametrize( "replace_data", [ [100, 200, 300, 400, 500], cudf.Series([100, 200, 300, 400, 500]), cudf.Series([100, 200, 300, 400, 500], index=[2, 3, 4, 5, 6]), ], ) def test_series_set_equal_length_object_by_mask(replace_data): psr = pd.Series([1, 2, 3, 4, 5], dtype="Int64") gsr = cudf.from_pandas(psr) # Lengths match in trivial case pd_bool_col = pd.Series([True] * len(psr), dtype="boolean") gd_bool_col = cudf.from_pandas(pd_bool_col) psr[pd_bool_col] = ( replace_data.to_pandas(nullable=True) if hasattr(replace_data, "to_pandas") else pd.Series(replace_data) ) gsr[gd_bool_col] = replace_data assert_eq(psr.astype("float"), gsr.astype("float")) # Test partial masking psr[psr > 1] = ( replace_data.to_pandas() if hasattr(replace_data, "to_pandas") else pd.Series(replace_data) ) gsr[gsr > 1] = replace_data assert_eq(psr.astype("float"), gsr.astype("float")) def test_column_set_equal_length_object_by_mask(): # Series.__setitem__ might bypass some of the cases # handled in column.__setitem__ so this test is needed data = cudf.Series([0, 0, 1, 1, 1])._column replace_data = cudf.Series([100, 200, 300, 400, 500])._column bool_col = cudf.Series([True, True, True, True, True])._column data[bool_col] = replace_data assert_eq(cudf.Series(data), cudf.Series(replace_data)) data = cudf.Series([0, 0, 1, 1, 1])._column bool_col = cudf.Series([True, False, True, False, True])._column data[bool_col] = replace_data assert_eq(cudf.Series(data), cudf.Series([100, 0, 300, 1, 500])) def test_column_set_unequal_length_object_by_mask(): data = [1, 2, 3, 4, 5] replace_data_1 = [8, 9] replace_data_2 = [8, 9, 10, 11] mask = [True, True, False, True, False] psr = pd.Series(data) gsr = cudf.Series(data) assert_exceptions_equal( psr.__setitem__, gsr.__setitem__, ([mask, replace_data_1], {}), ([mask, replace_data_1], {}), ) psr = pd.Series(data) gsr = cudf.Series(data) assert_exceptions_equal( psr.__setitem__, gsr.__setitem__, ([mask, replace_data_2], {}), ([mask, replace_data_2], {}), ) def test_categorical_setitem_invalid(): ps = pd.Series([1, 2, 3], dtype="category") gs = cudf.Series([1, 2, 3], dtype="category") if PANDAS_GE_150: assert_exceptions_equal( lfunc=ps.__setitem__, rfunc=gs.__setitem__, lfunc_args_and_kwargs=([0, 5], {}), rfunc_args_and_kwargs=([0, 5], {}), ) else: # Following workaround is needed because: # https://github.com/pandas-dev/pandas/issues/46646 with pytest.raises( ValueError, match="Cannot setitem on a Categorical with a new category, set " "the categories first", ): gs[0] = 5 def test_series_slice_setitem_list(): actual = cudf.Series([[[1, 2], [2, 3]], [[3, 4]], [[4, 5]], [[6, 7]]]) actual[slice(0, 3, 1)] = [[10, 11], [12, 23]] expected = cudf.Series( [ [[10, 11], [12, 23]], [[10, 11], [12, 23]], [[10, 11], [12, 23]], [[6, 7]], ] ) assert_eq(actual, expected) actual = cudf.Series([[[1, 2], [2, 3]], [[3, 4]], [[4, 5]], [[6, 7]]]) actual[0:3] = cudf.Scalar([[10, 11], [12, 23]]) assert_eq(actual, expected) def test_series_slice_setitem_struct(): actual = cudf.Series( [ {"a": {"b": 10}, "b": 11}, {"a": {"b": 100}, "b": 5}, {"a": {"b": 50}, "b": 2}, {"a": {"b": 1000}, "b": 67}, {"a": {"b": 4000}, "b": 1090}, ] ) actual[slice(0, 3, 1)] = {"a": {"b": 5050}, "b": 101} expected = cudf.Series( [ {"a": {"b": 5050}, "b": 101}, {"a": {"b": 5050}, "b": 101}, {"a": {"b": 5050}, "b": 101}, {"a": {"b": 1000}, "b": 67}, {"a": {"b": 4000}, "b": 1090}, ] ) assert_eq(actual, expected) actual = cudf.Series( [ {"a": {"b": 10}, "b": 11}, {"a": {"b": 100}, "b": 5}, {"a": {"b": 50}, "b": 2}, {"a": {"b": 1000}, "b": 67}, {"a": {"b": 4000}, "b": 1090}, ] ) actual[0:3] = cudf.Scalar({"a": {"b": 5050}, "b": 101}) assert_eq(actual, expected) @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64]) @pytest.mark.parametrize("indices", [0, [1, 2]]) def test_series_setitem_upcasting(dtype, indices): sr = pd.Series([0, 0, 0], dtype=dtype) cr = cudf.from_pandas(sr) assert_eq(sr, cr) # Must be a non-integral floating point value that can't be losslessly # converted to float32, otherwise pandas will try and match the source # column dtype. new_value = np.float64(np.pi) col_ref = cr._column sr[indices] = new_value cr[indices] = new_value if PANDAS_GE_150: assert_eq(sr, cr) else: # pandas bug, incorrectly fails to upcast from float32 to float64 assert_eq(sr.values, cr.values) if dtype == np.float64: # no-op type cast should not modify backing column assert col_ref == cr._column # TODO: these two tests could perhaps be changed once specifics of # pandas compat wrt upcasting are decided on; this is just baking in # status-quo. def test_series_setitem_upcasting_string_column(): sr = pd.Series([0, 0, 0], dtype=str) cr = cudf.from_pandas(sr) new_value = np.float64(10.5) sr[0] = str(new_value) cr[0] = new_value assert_eq(sr, cr) def test_series_setitem_upcasting_string_value(): sr = cudf.Series([0, 0, 0], dtype=int) # This is a distinction with pandas, which lets you instead make an # object column with ["10", 0, 0] sr[0] = "10" assert_eq(pd.Series([10, 0, 0], dtype=int), sr) with pytest.raises(ValueError): sr[0] = "non-integer" def test_scatter_by_slice_with_start_and_step(): source = pd.Series([1, 2, 3, 4, 5]) csource = cudf.from_pandas(source) target = pd.Series([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) ctarget = cudf.from_pandas(target) target[1::2] = source ctarget[1::2] = csource assert_eq(target, ctarget) @pytest.mark.parametrize("n", [1, 3]) def test_setitem_str_trailing_null(n): trailing_nulls = "\x00" * n s = cudf.Series(["a", "b", "c" + trailing_nulls]) assert s[2] == "c" + trailing_nulls s[0] = "a" + trailing_nulls assert s[0] == "a" + trailing_nulls s[1] = trailing_nulls assert s[1] == trailing_nulls s[0] = "" assert s[0] == "" s[0] = "\x00" assert s[0] == "\x00" @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/7448") def test_iloc_setitem_7448(): index = pd.MultiIndex.from_product([(1, 2), (3, 4)]) expect = cudf.Series([1, 2, 3, 4], index=index) actual = cudf.from_pandas(expect) expect[(1, 3)] = 101 actual[(1, 3)] = 101 assert_eq(expect, actual) @pytest.mark.parametrize( "value", [ "7", pytest.param( ["7", "8"], marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/11298" ), ), ], ) def test_loc_setitem_string_11298(value): df = pd.DataFrame({"a": ["a", "b", "c"]}) cdf = cudf.from_pandas(df) df.loc[:1, "a"] = value cdf.loc[:1, "a"] = value assert_eq(df, cdf) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/11944") def test_loc_setitem_list_11944(): df = pd.DataFrame( data={"a": ["yes", "no"], "b": [["l1", "l2"], ["c", "d"]]} ) cdf = cudf.from_pandas(df) df.loc[df.a == "yes", "b"] = [["hello"]] cdf.loc[df.a == "yes", "b"] = [["hello"]] assert_eq(df, cdf) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/12504") def test_loc_setitem_extend_empty_12504(): df = pd.DataFrame(columns=["a"]) cdf = cudf.from_pandas(df) df.loc[0] = [1] cdf.loc[0] = [1] assert_eq(df, cdf) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/12505") def test_loc_setitem_extend_existing_12505(): df = pd.DataFrame({"a": [0]}) cdf = cudf.from_pandas(df) df.loc[1] = 1 cdf.loc[1] = 1 assert_eq(df, cdf) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/12801") def test_loc_setitem_add_column_partial_12801(): df = pd.DataFrame({"a": [0, 1, 2]}) cdf = cudf.from_pandas(df) df.loc[df.a < 2, "b"] = 1 cdf.loc[cdf.a < 2, "b"] = 1 assert_eq(df, cdf) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/13031") @pytest.mark.parametrize("other_index", [["1", "3", "2"], [1, 2, 3]]) def test_loc_setitem_series_index_alignment_13031(other_index): s = pd.Series([1, 2, 3], index=["1", "2", "3"]) other = pd.Series([5, 6, 7], index=other_index) cs = cudf.from_pandas(s) cother = cudf.from_pandas(other) s.loc[["1", "3"]] = other cs.loc[["1", "3"]] = cother assert_eq(s, cs) @pytest.mark.parametrize( "ps", [ pd.Series([1, 2, 3], index=pd.RangeIndex(0, 3)), pd.Series([1, 2, 3], index=pd.RangeIndex(start=2, stop=-1, step=-1)), pd.Series([1, 2, 3], index=pd.RangeIndex(start=1, stop=6, step=2)), pd.Series( [1, 2, 3, 4, 5], index=pd.RangeIndex(start=1, stop=-9, step=-2) ), pd.Series( [1, 2, 3, 4, 5], index=pd.RangeIndex(start=1, stop=-12, step=-3) ), pd.Series([1, 2, 3, 4], index=pd.RangeIndex(start=1, stop=14, step=4)), pd.Series( [1, 2, 3, 4], index=pd.RangeIndex(start=1, stop=-14, step=-4) ), ], ) @pytest.mark.parametrize("arg", list(range(-20, 20)) + [5.6, 3.1]) def test_series_set_item_range_index(ps, arg): gsr = cudf.from_pandas(ps) psr = ps.copy(deep=True) psr[arg] = 11 gsr[arg] = 11 assert_eq(psr, gsr, check_index_type=True) def test_series_set_item_index_reference(): gs1 = cudf.Series([1], index=[7]) gs2 = cudf.Series([2], index=gs1.index) gs1.loc[11] = 2 ps1 = pd.Series([1], index=[7]) ps2 = pd.Series([2], index=ps1.index) ps1.loc[11] = 2 assert_eq(ps1, gs1) assert_eq(ps2, gs2)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_csv.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import codecs import gzip import os import re import shutil from collections import OrderedDict from io import BytesIO, StringIO from pathlib import Path import cupy as cp import numpy as np import pandas as pd import pytest from pyarrow import fs as pa_fs import cudf from cudf import read_csv from cudf.core._compat import PANDAS_LT_140 from cudf.testing._utils import assert_eq, assert_exceptions_equal def make_numeric_dataframe(nrows, dtype): df = pd.DataFrame() df["col1"] = np.arange(nrows, dtype=dtype) df["col2"] = np.arange(1, 1 + nrows, dtype=dtype) return df def make_datetime_dataframe(include_non_standard=False): df = pd.DataFrame() df["col1"] = np.array( [ "31/10/2010", "05/03/2001", "20/10/1994", "18/10/1990", "1/1/1970", "2016-04-30T01:02:03.000", "2038-01-19 03:14:07", ] ) df["col2"] = np.array( [ "18/04/1995", "14 / 07 / 1994", "07/06/2006", "16/09/2005", "2/2/1970", "2007-4-30 1:6:40.000PM", "2038-01-19 03:14:08", ] ) if include_non_standard: # Last column contains non-standard date formats df["col3"] = np.array( [ "1 Jan", "2 January 1994", "Feb 2002", "31-01-2000", "1-1-1996", "15-May-2009", "21-Dec-3262", ] ) return df def make_numpy_mixed_dataframe(): df = pd.DataFrame() df["Integer"] = np.array([2345, 11987, 9027, 9027]) df["Date"] = np.array( ["18/04/1995", "14/07/1994", "07/06/2006", "16/09/2005"] ) df["Float"] = np.array([9.001, 8.343, 6, 2.781]) df["Integer2"] = np.array([2345, 106, 2088, 789277]) df["Category"] = np.array(["M", "F", "F", "F"]) df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"]) df["Boolean"] = np.array([True, False, True, False]) return df @pytest.fixture def pd_mixed_dataframe(): return make_numpy_mixed_dataframe() @pytest.fixture def cudf_mixed_dataframe(): return cudf.from_pandas(make_numpy_mixed_dataframe()) def make_all_numeric_dataframe(): df = pd.DataFrame() gdf_dtypes = [ "float", "float32", "double", "float64", "int8", "short", "int16", "int", "int32", "long", "int64", "uint8", "uint16", "uint32", "uint64", ] np_dtypes = [ np.float32, np.float32, np.float64, np.float64, np.int8, np.int16, np.int16, np.int32, np.int32, np.int64, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, ] for i in range(len(gdf_dtypes)): df[gdf_dtypes[i]] = np.arange(10, dtype=np_dtypes[i]) return ( df, OrderedDict(zip(gdf_dtypes, gdf_dtypes)), OrderedDict(zip(gdf_dtypes, np_dtypes)), ) def make_all_numeric_extremes_dataframe(): # integers 0,+1,-1,min,max # float 0.0, -0.0,+1,-1,min,max, nan, esp, espneg, tiny, [-ve values] df, gdf_dtypes, pdf_dtypes = make_all_numeric_dataframe() df = pd.DataFrame() for gdf_dtype in gdf_dtypes: np_type = pdf_dtypes[gdf_dtype] if np.issubdtype(np_type, np.integer): itype = np.iinfo(np_type) extremes = [0, +1, -1, itype.min, itype.max] df[gdf_dtype] = np.array(extremes * 4).astype(np_type)[:20] else: ftype = np.finfo(np_type) extremes = [ 0.0, -0.0, +1, -1, np.nan, -np.nan, # ftype.min, # TODO enable after fixing truncation issue #6235 # ftype.max, # TODO enable after fixing truncation issue #6235 np_type(np.inf), -np_type(np.inf), ftype.eps, ftype.epsneg, ftype.tiny, -ftype.eps, -ftype.epsneg, -ftype.tiny, ] df[gdf_dtype] = np.array(extremes * 4, dtype=np_type)[:20] return ( df, gdf_dtypes, pdf_dtypes, ) @pytest.fixture def pandas_extreme_numeric_dataframe(): return make_all_numeric_extremes_dataframe()[0] @pytest.fixture def cudf_extreme_numeric_dataframe(pandas_extreme_numeric_dataframe): return cudf.from_pandas(pandas_extreme_numeric_dataframe) @pytest.fixture def path_or_buf(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_path_or_buf.csv") df = make_numeric_dataframe(10, np.int32) df.to_csv(fname, index=False, header=False) buffer = df.to_csv(index=False, header=False) def _make_path_or_buf(src): if src == "filepath": return str(fname) if src == "pathobj": return fname if src == "bytes_io": return BytesIO(buffer.encode()) if src == "string_io": return StringIO(buffer) if src == "url": return Path(fname).as_uri() raise ValueError("Invalid source type") yield _make_path_or_buf dtypes = [np.float64, np.float32, np.int64, np.int32, np.uint64, np.uint32] dtypes_dict = {"1": np.float64, "2": np.float32, "3": np.int64, "4": np.int32} nelem = [5, 25, 100] @pytest.mark.parametrize("dtype", dtypes) @pytest.mark.parametrize("nelem", nelem) def test_csv_reader_numeric_data(dtype, nelem, tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file1.csv") df = make_numeric_dataframe(nelem, dtype) df.to_csv(fname, index=False, header=False) dtypes = [df[k].dtype for k in df.columns] out = read_csv(str(fname), names=list(df.columns.values), dtype=dtypes) assert len(out.columns) == len(df.columns) assert_eq(df, out) @pytest.mark.parametrize("parse_dates", [["date2"], [0], ["date1", 1, "bad"]]) def test_csv_reader_datetime(parse_dates): df = make_datetime_dataframe(include_non_standard=True) buffer = df.to_csv(index=False, header=False) gdf = read_csv( StringIO(buffer), names=["date1", "date2", "bad"], parse_dates=parse_dates, dayfirst=True, ) pdf = pd.read_csv( StringIO(buffer), names=["date1", "date2", "bad"], parse_dates=parse_dates, dayfirst=True, ) assert_eq(gdf, pdf) @pytest.mark.parametrize("pandas_arg", [{"delimiter": "|"}, {"sep": "|"}]) @pytest.mark.parametrize("cudf_arg", [{"sep": "|"}, {"delimiter": "|"}]) def test_csv_reader_mixed_data_delimiter_sep( tmpdir, pandas_arg, cudf_arg, pd_mixed_dataframe ): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file3.csv") pd_mixed_dataframe.to_csv(fname, sep="|", index=False, header=False) gdf1 = read_csv( str(fname), names=["1", "2", "3", "4", "5", "6", "7"], dtype=["int64", "date", "float64", "int64", "category", "str", "bool"], dayfirst=True, **cudf_arg, ) gdf2 = read_csv( str(fname), names=["1", "2", "3", "4", "5", "6", "7"], dtype=["int64", "date", "float64", "int64", "category", "str", "bool"], dayfirst=True, **pandas_arg, ) pdf = pd.read_csv( fname, names=["1", "2", "3", "4", "5", "6", "7"], parse_dates=[1], dayfirst=True, **pandas_arg, ) assert len(gdf1.columns) == len(pdf.columns) assert len(gdf2.columns) == len(pdf.columns) assert_eq(gdf1, gdf2) @pytest.mark.parametrize("use_list", [False, True]) def test_csv_reader_dtype_list(use_list): df = make_numeric_dataframe(10, dtype=np.float32) buffer = df.to_csv(index=False, header=False) # PANDAS doesn't list but cudf does (treated as implied ordered dict) # Select first column's dtype if non-list; expect the same dtype for all if use_list: dtypes = [df[k].dtype for k in df.columns] else: dtypes = df[df.columns[0]].dtype gdf = read_csv(StringIO(buffer), dtype=dtypes, names=df.columns) assert_eq(gdf, df) @pytest.mark.parametrize("use_names", [False, True]) def test_csv_reader_dtype_dict(use_names): # Save with the column header if not explicitly specifying a list of names df, gdf_dtypes, pdf_dtypes = make_all_numeric_dataframe() buffer = df.to_csv(index=False, header=(not use_names)) dtypes = df.dtypes.to_dict() gdf_names = list(gdf_dtypes.keys()) if use_names else None pdf_names = list(pdf_dtypes.keys()) if use_names else None gdf = read_csv(StringIO(buffer), dtype=dtypes, names=gdf_names) pdf = pd.read_csv(StringIO(buffer), dtype=dtypes, names=pdf_names) assert_eq(gdf, pdf) @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") @pytest.mark.parametrize("use_names", [True, False]) def test_csv_reader_dtype_extremes(use_names): # Save with the column header if not explicitly specifying a list of names df, gdf_dtypes, pdf_dtypes = make_all_numeric_extremes_dataframe() buffer = df.to_csv(index=False, header=(not use_names)) dtypes = df.dtypes.to_dict() gdf_names = list(gdf_dtypes.keys()) if use_names else None pdf_names = list(pdf_dtypes.keys()) if use_names else None gdf = read_csv(StringIO(buffer), dtype=dtypes, names=gdf_names) pdf = pd.read_csv(StringIO(buffer), dtype=dtypes, names=pdf_names) assert_eq(gdf, pdf) def test_csv_reader_skiprows_skipfooter(tmpdir, pd_mixed_dataframe): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file5.csv") pd_mixed_dataframe.to_csv( fname, columns=["Integer", "Date", "Float"], index=False, header=False ) # Using engine='python' to eliminate pandas warning of using python engine. df_out = pd.read_csv( fname, names=["1", "2", "3"], parse_dates=[1], dayfirst=True, skiprows=1, skipfooter=1, engine="python", ) out = read_csv( str(fname), names=["1", "2", "3"], dtype=["int64", "date", "float64"], skiprows=1, skipfooter=1, dayfirst=True, ) assert len(out.columns) == len(df_out.columns) assert len(out) == len(df_out) assert_eq(df_out, out) def test_csv_reader_negative_vals(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file6.csv") names = ["0", "1", "2"] dtypes = ["float32", "float32", "float32"] lines = [ ",".join(names), "-181.5060,-185.37000,-3", "-127.6300,-230.54600,-9", ] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) zero = [-181.5060, -127.6300] one = [-185.370, -230.54600] two = [-3, -9] df = read_csv(str(fname), names=names, dtype=dtypes, skiprows=1) np.testing.assert_allclose(zero, df["0"].to_numpy()) np.testing.assert_allclose(one, df["1"].to_numpy()) np.testing.assert_allclose(two, df["2"].to_numpy()) def test_csv_reader_strings(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file7.csv") names = ["text", "int"] dtypes = ["str", "int"] lines = [",".join(names), "a,0", "b,0", "c,0", "d,0"] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) df = read_csv( str(fname), names=names, dtype=dtypes, skiprows=1, decimal=".", thousands="'", ) assert len(df.columns) == 2 assert df["text"].dtype == np.dtype("object") assert df["int"].dtype == np.dtype("int64") assert df["text"][0] == "a" assert df["text"][1] == "b" assert df["text"][2] == "c" assert df["text"][3] == "d" def test_csv_reader_strings_quotechars(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file8.csv") names = ["text", "int"] dtypes = ["str", "int"] lines = [",".join(names), '"a,\n",0', '"b ""c"" d",0', "e,0", '"f,,!.,",0'] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) df = read_csv( str(fname), names=names, dtype=dtypes, skiprows=1, quotechar='"', quoting=1, ) assert len(df.columns) == 2 assert df["text"].dtype == np.dtype("object") assert df["int"].dtype == np.dtype("int64") assert df["text"][0] == "a,\n" assert df["text"][1] == 'b "c" d' assert df["text"][2] == "e" assert df["text"][3] == "f,,!.," def test_csv_reader_usecols_int_char(tmpdir, pd_mixed_dataframe): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file10.csv") pd_mixed_dataframe.to_csv( fname, columns=["Integer", "Date", "Float", "Integer2"], index=False, header=False, ) df_out = pd.read_csv(fname, usecols=[0, 1, 3]) out = read_csv(fname, usecols=[0, 1, 3]) assert len(out.columns) == len(df_out.columns) assert len(out) == len(df_out) assert_eq(df_out, out, check_names=False) @pytest.mark.parametrize( "buffer", [ "abc,ABC,abc,abcd,abc\n1,2,3,4,5\n", "A,A,A.1,A,A.2,A,A.4,A,A\n1,2,3.1,4,a.2,a,a.4,a,a", "A,A,A.1,,Unnamed: 4,A,A.4,A,A\n1,2,3.1,4,a.2,a,a.4,a,a", ], ) @pytest.mark.parametrize("mangle_dupe_cols", [True, False]) def test_csv_reader_mangle_dupe_cols(tmpdir, buffer, mangle_dupe_cols): # Default: mangle_dupe_cols=True cu_df = read_csv(StringIO(buffer), mangle_dupe_cols=mangle_dupe_cols) if mangle_dupe_cols: pd_df = pd.read_csv(StringIO(buffer)) else: # Pandas does not support mangle_dupe_cols=False head = buffer.split("\n")[0].split(",") first_cols = np.unique(head, return_index=True)[1] pd_df = pd.read_csv(StringIO(buffer), usecols=first_cols) assert_eq(cu_df, pd_df) def test_csv_reader_float_decimal(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file12.csv") names = ["basic_32", "basic_64", "round", "decimal_only", "precision"] dtypes = ["float32", "float64", "float64", "float32", "float64"] lines = [ ";".join(names), "1,2;1234,5678;12345;0,123;-73,98007199999998", "3,4;3456,7890;67890;,456;1,7976931348623157e+307", "5,6e0;0,5679e2;1,2e10;0,07e-001;0,0", ] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) basic_32_ref = [1.2, 3.4, 5.6] basic_64_ref = [1234.5678, 3456.7890, 56.79] round_ref = [12345, 67890, 12000000000] decimal_only_ref = [0.123, 0.456, 0.007] precision_ref = [-73.98007199999998, 1.7976931348623157e307, 0.0] df = read_csv( str(fname), names=names, dtype=dtypes, skiprows=1, delimiter=";", decimal=",", ) np.testing.assert_allclose(basic_32_ref, df["basic_32"].to_numpy()) np.testing.assert_allclose(basic_64_ref, df["basic_64"].to_numpy()) np.testing.assert_allclose(round_ref, df["round"].to_numpy()) np.testing.assert_allclose(decimal_only_ref, df["decimal_only"].to_numpy()) np.testing.assert_allclose(precision_ref, df["precision"].to_numpy()) def test_csv_reader_NaN_values(): names = dtypes = ["float32"] empty_cells = '\n""\n' default_na_cells = ( "#N/A\n#N/A N/A\n#NA\n-1.#IND\n" "-1.#QNAN\n-NaN\n-nan\n1.#IND\n" "1.#QNAN\nN/A\n<NA>\nNA\nNULL\n" "NaN\nn/a\nnan\nnull\n" ) custom_na_cells = "NV_NAN\nNotANumber\n" all_cells = empty_cells + default_na_cells + custom_na_cells custom_na_values = ["NV_NAN", "NotANumber"] # test default NA values. empty cells should also yield NaNs gdf = read_csv( StringIO(default_na_cells + empty_cells), names=names, dtype=dtypes ) pdf = pd.read_csv( StringIO(default_na_cells + empty_cells), names=names, dtype=np.float32 ) assert_eq(pdf, gdf) # custom NA values gdf = read_csv( StringIO(all_cells), names=names, dtype=dtypes, na_values=custom_na_values, ) pdf = pd.read_csv( StringIO(all_cells), names=names, dtype=np.float32, na_values=custom_na_values, ) assert_eq(pdf, gdf) # custom NA values gdf = read_csv( StringIO(empty_cells + default_na_cells + "_NAA_\n"), names=names, dtype=dtypes, na_values="_NAA_", ) pdf = pd.read_csv( StringIO(empty_cells + default_na_cells + "_NAA_\n"), names=names, dtype=np.float32, na_values="_NAA_", ) assert_eq(pdf, gdf) # data type detection should evaluate the column to int8 (all nulls) gdf = read_csv( StringIO(all_cells), header=None, na_values=custom_na_values, ) assert gdf.dtypes[0] == "int8" assert all(gdf["0"][idx] is cudf.NA for idx in range(len(gdf["0"]))) # data type detection should evaluate the column to object if some nulls gdf = read_csv(StringIO(all_cells), header=None) assert gdf.dtypes[0] == np.dtype("object") def test_csv_reader_thousands(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file13.csv") names = dtypes = [ "float32", "float64", "int32", "int64", "uint32", "uint64", ] lines = [ ",".join(names), "1'234.5, 1'234.567, 1'234'567, 1'234'567'890,\ 1'234'567, 1'234'567'890", "12'345.6, 123'456.7, 12'345, 123'456'789, 12'345, 123'456'789", ] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) f32_ref = [1234.5, 12345.6] f64_ref = [1234.567, 123456.7] int32_ref = [1234567, 12345] int64_ref = [1234567890, 123456789] uint32_ref = [1234567, 12345] uint64_ref = [1234567890, 123456789] df = read_csv( str(fname), names=names, dtype=dtypes, skiprows=1, thousands="'" ) np.testing.assert_allclose(f32_ref, df["float32"].to_numpy()) np.testing.assert_allclose(f64_ref, df["float64"].to_numpy()) np.testing.assert_allclose(int32_ref, df["int32"].to_numpy()) np.testing.assert_allclose(int64_ref, df["int64"].to_numpy()) np.testing.assert_allclose(uint32_ref, df["uint32"].to_numpy()) np.testing.assert_allclose(uint64_ref, df["uint64"].to_numpy()) def test_csv_reader_buffer_strings(): names = ["text", "int"] dtypes = ["str", "int"] lines = [",".join(names), "a,0", "b,0", "c,0", "d,0"] buffer = "\n".join(lines) df = read_csv(StringIO(buffer), names=names, dtype=dtypes, skiprows=1) assert len(df.columns) == 2 assert df["text"].dtype == np.dtype("object") assert df["int"].dtype == np.dtype("int64") assert df["text"][0] == "a" assert df["text"][1] == "b" assert df["text"][2] == "c" assert df["text"][3] == "d" df2 = read_csv( BytesIO(str.encode(buffer)), names=names, dtype=dtypes, skiprows=1 ) assert len(df2.columns) == 2 assert df2["text"].dtype == np.dtype("object") assert df2["int"].dtype == np.dtype("int64") assert df2["text"][0] == "a" assert df2["text"][1] == "b" assert df2["text"][2] == "c" assert df2["text"][3] == "d" @pytest.mark.parametrize( "ext, out_comp, in_comp", [ (".geez", "gzip", "gzip"), (".beez", "bz2", "bz2"), (".gz", "gzip", "infer"), (".bz2", "bz2", "infer"), (".beez", "bz2", np.str_("bz2")), (".data", None, "infer"), (".txt", None, None), ("", None, None), ], ) def test_csv_reader_compression( tmpdir, ext, out_comp, in_comp, pd_mixed_dataframe ): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_compression" + ext) df = pd_mixed_dataframe df.to_csv(fname, index=False, header=False, compression=out_comp) gdf = read_csv(fname, names=list(df.columns.values), compression=in_comp) pdf = pd.read_csv( fname, names=list(df.columns.values), compression=in_comp ) assert_eq(gdf, pdf) @pytest.mark.parametrize( "names, dtypes, data, trues, falses", [ ( ["A", "B"], ["bool", "bool"], "True,True\nFalse,False\nTrue,False", None, None, ), ( ["A", "B"], ["int32", "int32"], "True,1\nFalse,2\nTrue,3", None, None, ), ( ["A", "B"], ["int32", "int32"], "YES,1\nno,2\nyes,3\nNo,4\nYes,5", ["yes", "Yes", "YES"], ["no", "NO", "No"], ), (["A", "B"], ["int32", "int32"], "foo,bar\nbar,foo", ["foo"], ["bar"]), (["x", "y"], None, "True,1\nFalse,0", None, None), ], ) def test_csv_reader_bools(tmpdir, names, dtypes, data, trues, falses): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file11.csv") lines = [",".join(names), data] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) # Usage of true_values and false_values makes that column into bool type df_out = pd.read_csv( fname, names=names, skiprows=1, dtype=(dtypes[0] if dtypes else None), true_values=trues, false_values=falses, ) out = read_csv( fname, names=names, dtype=dtypes, skiprows=1, true_values=trues, false_values=falses, ) assert_eq(df_out, out) def test_csv_reader_bools_custom(): names = ["text", "bool"] dtypes = {"text": "str", "bool": "bool"} trues = ["foo", "1"] falses = ["bar", "0"] lines = [ ",".join(names), "true,true", "false,false", "foo,foo", "bar,bar", "0,0", "1,1", ] buffer = "\n".join(lines) df = read_csv( StringIO(buffer), names=names, dtype=dtypes, skiprows=1, true_values=trues, false_values=falses, ) # Note: bool literals give parsing errors as int # "0" and "1" give parsing errors as bool in pandas expected = pd.read_csv( StringIO(buffer), names=names, dtype=dtypes, skiprows=1, true_values=trues, false_values=falses, ) assert_eq(df, expected, check_dtype=True) def test_csv_reader_bools_NA(): names = ["text", "int"] dtypes = ["str", "int"] trues = ["foo"] falses = ["bar"] lines = [ ",".join(names), "true,true", "false,false", "foo,foo", "bar,bar", "qux,qux", ] buffer = "\n".join(lines) df = read_csv( StringIO(buffer), names=names, dtype=dtypes, skiprows=1, true_values=trues, false_values=falses, ) assert len(df.columns) == 2 assert df["text"].dtype == np.dtype("object") assert df["int"].dtype == np.dtype("int64") expected = pd.DataFrame( { "text": ["true", "false", "foo", "bar", "qux"], "int": [1.0, 0.0, 1.0, 0.0, np.nan], } ) assert_eq(df, expected) def test_csv_quotednumbers(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file12.csv") names = ["integer", "decimal"] dtypes = ["int32", "float32"] lines = [ ",".join(names), '1,"3.14"', '"2","300"', '"3",10101.0101', '4,"6.28318"', ] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) integer_ref = [1, 2, 3, 4] decimal_ref = [3.14, 300, 10101.0101, 6.28318] df1 = read_csv(str(fname), names=names, dtype=dtypes, skiprows=1) df2 = read_csv(str(fname), names=names, dtype=dtypes, skiprows=1) assert len(df2.columns) == 2 np.testing.assert_allclose(integer_ref, df1["integer"].to_numpy()) np.testing.assert_allclose(decimal_ref, df1["decimal"].to_numpy()) np.testing.assert_allclose(integer_ref, df2["integer"].to_numpy()) np.testing.assert_allclose(decimal_ref, df2["decimal"].to_numpy()) def test_csv_reader_nrows(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file14.csv") names = ["int1", "int2"] dtypes = ["int32", "int32"] rows = 4000000 read_rows = (rows * 3) // 4 skip_rows = (rows - read_rows) // 2 sample_skip = 1000 with open(str(fname), "w") as fp: fp.write(",".join(names) + "\n") for i in range(rows): fp.write(str(i) + ", " + str(2 * i) + " \n") # with specified names df = read_csv( str(fname), names=names, dtype=dtypes, skiprows=skip_rows + 1, nrows=read_rows, ) assert df.shape == (read_rows, 2) for row in range(0, read_rows // sample_skip, sample_skip): assert df["int1"][row] == row + skip_rows assert df["int2"][row] == 2 * (row + skip_rows) assert df["int2"][read_rows - 1] == 2 * (read_rows - 1 + skip_rows) # with column name inference df = read_csv( str(fname), dtype=dtypes, skiprows=skip_rows + 1, nrows=read_rows ) assert df.shape == (read_rows, 2) assert str(skip_rows) in list(df)[0] assert str(2 * skip_rows) in list(df)[1] for row in range(0, read_rows // sample_skip, sample_skip): assert df[list(df)[0]][row] == row + skip_rows + 1 assert df[list(df)[1]][row] == 2 * (row + skip_rows + 1) assert df[list(df)[1]][read_rows - 1] == 2 * (read_rows + skip_rows) # nrows larger than the file df = read_csv(str(fname), dtype=dtypes, nrows=rows * 2) assert df.shape == (rows, 2) for row in range(0, rows // sample_skip, sample_skip): assert df["int1"][row] == row assert df["int2"][row] == 2 * row assert df["int2"][rows - 1] == 2 * (rows - 1) # nrows + skiprows larger than the file df = read_csv( str(fname), dtype=dtypes, nrows=read_rows, skiprows=read_rows ) assert df.shape == (rows - read_rows, 2) # nrows equal to zero df = read_csv(str(fname), dtype=dtypes, nrows=0) assert df.shape == (0, 2) # with both skipfooter and nrows - should throw with pytest.raises(ValueError): read_csv(str(fname), nrows=read_rows, skipfooter=1) def test_csv_reader_gzip_compression_strings(tmpdir): fnamebase = tmpdir.mkdir("gdf_csv") fname = fnamebase.join("tmp_csvreader_file15.csv") fnamez = fnamebase.join("tmp_csvreader_file15.csv.gz") names = ["text", "int"] dtypes = ["str", "int"] lines = [",".join(names), "a,0", "b,0", "c,0", "d,0"] with open(str(fname), "w") as fp: fp.write("\n".join(lines)) with open(str(fname), "rb") as f_in, gzip.open(str(fnamez), "wb") as f_out: shutil.copyfileobj(f_in, f_out) df = read_csv( str(fnamez), names=names, dtype=dtypes, skiprows=1, decimal=".", thousands="'", compression="gzip", ) assert len(df.columns) == 2 assert df["text"].dtype == np.dtype("object") assert df["int"].dtype == np.dtype("int64") assert df["text"][0] == "a" assert df["text"][1] == "b" assert df["text"][2] == "c" assert df["text"][3] == "d" @pytest.mark.parametrize("skip_rows", [0, 2, 4]) @pytest.mark.parametrize("header_row", [0, 2]) def test_csv_reader_skiprows_header(skip_rows, header_row): names = ["float_point", "integer"] dtypes = ["float64", "int64"] lines = [ ",".join(names), "1.2, 1", "2.3, 2", "3.4, 3", "4.5, 4", "5.6, 5", "6.7, 6", ] buffer = "\n".join(lines) cu_df = read_csv( StringIO(buffer), dtype=dtypes, skiprows=skip_rows, header=header_row ) pd_df = pd.read_csv( StringIO(buffer), skiprows=skip_rows, header=header_row ) assert cu_df.shape == pd_df.shape assert list(cu_df.columns.values) == list(pd_df.columns.values) def test_csv_reader_dtype_inference(): names = ["float_point", "integer"] lines = [ ",".join(names), "1.2,1", "2.3,2", "3.4,3", "4.5,4", "5.6,5", "6.7,6", ] buffer = "\n".join(lines) cu_df = read_csv(StringIO(buffer)) pd_df = pd.read_csv(StringIO(buffer)) assert cu_df.shape == pd_df.shape assert list(cu_df.columns.values) == list(pd_df.columns.values) def test_csv_reader_dtype_inference_whitespace(): names = ["float_point", "integer"] lines = [ ",".join(names), " 1.2, 1", "2.3,2 ", " 3.4, 3", " 4.5,4", "5.6, 5", " 6.7,6 ", ] buffer = "\n".join(lines) cu_df = read_csv(StringIO(buffer)) pd_df = pd.read_csv(StringIO(buffer)) assert cu_df.shape == pd_df.shape assert list(cu_df.columns.values) == list(pd_df.columns.values) def test_csv_reader_empty_dataframe(): dtypes = ["float64", "int64"] buffer = "float_point, integer" # should work fine with dtypes df = read_csv(StringIO(buffer), dtype=dtypes) assert df.shape == (0, 2) assert all(df.dtypes == ["float64", "int64"]) # should default to string columns without dtypes df = read_csv(StringIO(buffer)) assert df.shape == (0, 2) assert all(df.dtypes == ["object", "object"]) def test_csv_reader_filenotfound(tmpdir): fname = "non-existing-filename.csv" # should raise an error with pytest.raises(FileNotFoundError): read_csv(str(fname)) # should raise an error dname = tmpdir.mkdir("gdf_csv") with pytest.raises(FileNotFoundError): read_csv(str(dname)) @pytest.mark.parametrize( "src", ["filepath", "pathobj", "bytes_io", "string_io", "url"] ) def test_csv_reader_filepath_or_buffer(tmpdir, path_or_buf, src): expect = pd.read_csv(path_or_buf("filepath")) got = cudf.read_csv(path_or_buf(src)) assert_eq(expect, got) def test_csv_reader_arrow_nativefile(path_or_buf): # Check that we can read a file opened with the # Arrow FileSystem interface expect = cudf.read_csv(path_or_buf("filepath")) fs, path = pa_fs.FileSystem.from_uri(path_or_buf("filepath")) with fs.open_input_file(path) as fil: got = cudf.read_csv(fil) assert_eq(expect, got) def test_small_zip(tmpdir): df = pd.DataFrame( { "a": [1997] * 2, "b": ["Ford"] * 2, "c": ["Super, luxurious truck"] * 2, } ) fname = tmpdir.join("small_zip_file.zip") df.to_csv(fname, index=False) got = cudf.read_csv(fname) assert_eq(df, got) def test_csv_reader_carriage_return(tmpdir): rows = 1000 names = ["int_row", "int_double_row"] buffer = ",".join(names) + "\r\n" for row in range(rows): buffer += str(row) + ", " + str(2 * row) + "\r\n" df = read_csv(StringIO(buffer)) expect = cudf.DataFrame( {"int_row": cp.arange(rows), "int_double_row": cp.arange(rows) * 2} ) assert len(df) == rows assert_eq(expect, df) def test_csv_reader_tabs(): names = ["float_point", "integer", "date"] lines = [ ",".join(names), "1.2,\t12, \t11/22/1995", "3.4\t,\t34\t,\t 01/01/2001", "\t 5.6,56 \t, 12/12/1970", "\t7.8 , 78\t,06/15/2018 \t", ] buffer = "\n".join(lines) df = read_csv(StringIO(buffer), parse_dates=["date"]) assert df.shape == (4, 3) floats = [1.2, 3.4, 5.6, 7.8] ints = [12, 34, 56, 78] dates = [ "1995-11-22T00:00:00.000000000", "2001-01-01T00:00:00.000000000", "1970-12-12T00:00:00.000000000", "2018-06-15T00:00:00.000000000", ] np.testing.assert_allclose(floats, df["float_point"].to_numpy()) np.testing.assert_allclose(ints, df["integer"].to_numpy()) for row in range(4): assert str(df["date"][row]) == dates[row] @pytest.mark.parametrize("segment_bytes", [10000, 19999, 30001, 36000]) def test_csv_reader_byte_range(tmpdir, segment_bytes): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file16.csv") names = ["int1", "int2"] rows = 10000 with open(str(fname), "w") as fp: for i in range(rows): fp.write(str(i) + ", " + str(2 * i) + " \n") file_size = os.stat(str(fname)).st_size ref_df = read_csv(str(fname), names=names).to_pandas() dfs = [] for segment in range((file_size + segment_bytes - 1) // segment_bytes): dfs.append( read_csv( str(fname), names=names, byte_range=(segment * segment_bytes, segment_bytes), ) ) df = cudf.concat(dfs).to_pandas() assert list(df["int1"]) == list(ref_df["int1"]) assert list(df["int2"]) == list(ref_df["int2"]) def test_csv_reader_byte_range_type_corner_case(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file17.csv") cudf.datasets.timeseries( start="2000-01-01", end="2000-01-02", dtypes={"name": str, "id": int, "x": float, "y": float}, ).to_csv(fname, chunksize=100000) byte_range = (2_147_483_648, 0) with pytest.raises(RuntimeError, match="Offset is past end of file"): cudf.read_csv(fname, byte_range=byte_range, header=None) @pytest.mark.parametrize("segment_bytes", [10, 19, 31, 36]) def test_csv_reader_byte_range_strings(segment_bytes): names = ["strings"] buffer = "\n".join('"' + str(x) + '"' for x in range(1, 100)) file_size = len(buffer) ref_df = read_csv(StringIO(buffer), names=names).to_pandas() dfs = [] for segment in range((file_size + segment_bytes - 1) // segment_bytes): dfs.append( read_csv( StringIO(buffer), names=names, byte_range=(segment * segment_bytes, segment_bytes), ) ) df = cudf.concat(dfs).to_pandas() assert list(df["strings"]) == list(ref_df["strings"]) @pytest.mark.parametrize( "header_row, skip_rows, skip_blanks", [ (1, 0, True), ("infer", 2, True), (1, 4, True), (3, 0, False), ("infer", 5, False), ], ) @pytest.mark.parametrize("lineterminator", ["\n", "\r\n"]) def test_csv_reader_blanks_and_comments( skip_rows, header_row, skip_blanks, lineterminator ): lines = [ "# first comment line", lineterminator, "# third comment line", "1,2,3", "4,5,6", "7,8,9", lineterminator, "# last comment line", lineterminator, "1,1,1", ] buffer = lineterminator.join(lines) cu_df = read_csv( StringIO(buffer), comment="#", header=header_row, skiprows=skip_rows, skip_blank_lines=skip_blanks, ) pd_df = pd.read_csv( StringIO(buffer), comment="#", header=header_row, skiprows=skip_rows, skip_blank_lines=skip_blanks, ) assert cu_df.shape == pd_df.shape assert list(cu_df.columns.values) == list(pd_df.columns.values) def test_csv_reader_prefix(): lines = ["1, 1, 1, 1"] buffer = "\n".join(lines) prefix_str = "a_prefix" df = read_csv(StringIO(buffer), header=None, prefix=prefix_str) column_names = list(df.columns.values) for col in range(len(column_names)): assert column_names[col] == prefix_str + str(col) def test_csv_reader_delim_whitespace(): buffer = "1 2 3\n4 5 6" # with header row cu_df = read_csv(StringIO(buffer), delim_whitespace=True) pd_df = pd.read_csv(StringIO(buffer), delim_whitespace=True) assert_eq(pd_df, cu_df) # without header row cu_df = read_csv(StringIO(buffer), delim_whitespace=True, header=None) pd_df = pd.read_csv(StringIO(buffer), delim_whitespace=True, header=None) assert pd_df.shape == cu_df.shape # should raise an error if used with delimiter or sep with pytest.raises(ValueError): read_csv(StringIO(buffer), delim_whitespace=True, delimiter=" ") with pytest.raises(ValueError): read_csv(StringIO(buffer), delim_whitespace=True, sep=" ") def test_csv_reader_unnamed_cols(): # first and last columns are unnamed buffer = ",1,2,3,\n4,5,6,7,8" cu_df = read_csv(StringIO(buffer)) pd_df = pd.read_csv(StringIO(buffer)) assert all(pd_df.columns == cu_df.columns) assert pd_df.shape == cu_df.shape def test_csv_reader_header_quotation(): buffer = '"1,,1","2,\n,2",3\n+4,+5,+6' cu_df = read_csv(StringIO(buffer)) pd_df = pd.read_csv(StringIO(buffer)) assert cu_df.shape == (1, 3) assert_eq(pd_df, cu_df) # test cases that fail with pandas buffer_pd_fail = '"1,one," , ",2,two" ,3\n4,5,6' cu_df = read_csv(StringIO(buffer_pd_fail)) assert cu_df.shape == (1, 3) def test_csv_reader_oversized_byte_range(): buffer = "a,b,c,d,e\n4,5,6,7,8" cu_df = read_csv(StringIO(buffer), byte_range=(0, 1024)) pd_df = pd.read_csv(StringIO(buffer)) assert all(pd_df.columns == cu_df.columns) assert pd_df.shape == cu_df.shape def test_csv_reader_index_col(): buffer = "0,1,2\n3,4,5\n6,7,8" names = ["int1", "int2", "int3"] # using a column name cu_df = read_csv(StringIO(buffer), names=names, index_col="int1") pd_df = pd.read_csv(StringIO(buffer), names=names, index_col="int1") assert_eq(pd_df, cu_df) # using a column index cu_df = read_csv(StringIO(buffer), header=None, index_col=0) pd_df = pd.read_csv(StringIO(buffer), header=None, index_col=0) assert_eq(cu_df.index, pd_df.index) # using a column index with names cu_df = read_csv(StringIO(buffer), header=None, index_col=0, names=names) pd_df = pd.read_csv( StringIO(buffer), header=None, index_col=0, names=names ) assert_eq(cu_df.index, pd_df.index) # passing False to avoid using a column as index (no-op in cuDF) cu_df = read_csv(StringIO(buffer), header=None, index_col=False) pd_df = pd.read_csv(StringIO(buffer), header=None, index_col=False) assert_eq(cu_df.index, pd_df.index) @pytest.mark.parametrize("index_name", [None, "custom name", 124]) @pytest.mark.parametrize("index_col", [None, 0, "a"]) def test_csv_reader_index_names(index_name, index_col): pdf = pd.DataFrame( {"a": [1, 2, 3], "b": [10, 11, 12]}, index=["AB", "CD", "EF"] ) pdf.index.name = index_name buffer = pdf.to_csv() actual = cudf.read_csv(StringIO(buffer), index_col=index_col) expected = pd.read_csv(StringIO(buffer), index_col=index_col) assert_eq(actual, expected) @pytest.mark.parametrize( "names", [["a", "b", "c"], [416, 905, 647], range(3), None] ) def test_csv_reader_column_names(names): buffer = "0,1,2\n3,4,5\n6,7,8" df = read_csv(StringIO(buffer), names=names) if names is None: assert list(df) == ["0", "1", "2"] else: assert list(df) == list(names) @pytest.mark.xfail( condition=PANDAS_LT_140, reason="https://github.com/rapidsai/cudf/issues/10618", ) def test_csv_reader_repeated_column_name(): buffer = """A,A,A.1,A,A.2,A,A.4,A,A 1,2,3.1,4,a.2,a,a.4,a,a 2,4,6.1,8,b.2,b,b.4,b,b""" # pandas and cudf to have same repeated column names pdf = pd.read_csv(StringIO(buffer)) gdf = cudf.read_csv(StringIO(buffer)) assert_eq(pdf.columns, gdf.columns) def test_csv_reader_bools_false_positives(tmpdir): # values that are equal to ["True", "TRUE", "False", "FALSE"] # when using ints to detect bool values items = [3977, 4329, 24015, 27567] buffer = "\n".join(str(i) for i in items) df = read_csv(StringIO(buffer), header=None, dtype=["int32"]) np.testing.assert_array_equal(items, df["0"].to_numpy()) def test_csv_reader_aligned_byte_range(tmpdir): fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file19.csv") nelem = 1000 input_df = pd.DataFrame( {"key": np.arange(0, nelem), "zeros": np.zeros(nelem)} ) input_df.to_csv(fname) df = cudf.read_csv(str(fname), byte_range=(0, 4096)) # read_csv call above used to crash; the assert below is not crucial assert np.count_nonzero(df["zeros"].to_pandas().values) == 0 @pytest.mark.parametrize( "pdf_dtype, gdf_dtype", [(None, None), ("int", "hex"), ("int32", "hex32"), ("int64", "hex64")], ) def test_csv_reader_hexadecimals(pdf_dtype, gdf_dtype): lines = ["0x0", "-0x1000", "0xfedcba", "0xABCDEF", "0xaBcDeF"] values = [int(hex_int, 16) for hex_int in lines] buffer = "\n".join(lines) if gdf_dtype is not None: # require explicit `hex` dtype to parse hexadecimals pdf = pd.DataFrame(data=values, dtype=pdf_dtype, columns=["hex_int"]) gdf = read_csv(StringIO(buffer), dtype=[gdf_dtype], names=["hex_int"]) np.testing.assert_array_equal( pdf["hex_int"], gdf["hex_int"].to_numpy() ) else: # otherwise, dtype inference returns as object (string) pdf = pd.read_csv(StringIO(buffer), names=["hex_int"]) gdf = read_csv(StringIO(buffer), names=["hex_int"]) assert_eq(pdf, gdf) @pytest.mark.parametrize( "np_dtype, gdf_dtype", [("int", "hex"), ("int32", "hex32"), ("int64", "hex64")], ) def test_csv_reader_hexadecimal_overflow(np_dtype, gdf_dtype): # This tests values which cause an overflow warning that will become an # error in pandas. NumPy wraps the overflow silently up to the bounds of a # signed int64. lines = [ "0x0", "-0x1000", "0xfedcba", "0xABCDEF", "0xaBcDeF", "0x9512c20b", "0x7fffffff", "0x7fffffffffffffff", "-0x8000000000000000", ] values = [int(hex_int, 16) for hex_int in lines] buffer = "\n".join(lines) gdf = read_csv(StringIO(buffer), dtype=[gdf_dtype], names=["hex_int"]) expected = np.array(values).astype(np_dtype) actual = gdf["hex_int"].to_numpy() np.testing.assert_array_equal(expected, actual) @pytest.mark.parametrize("quoting", [0, 1, 2, 3]) def test_csv_reader_pd_consistent_quotes(quoting): names = ["text"] dtypes = ["str"] lines = ['"a"', '"b ""c"" d"', '"f!\n."'] buffer = "\n".join(lines) gd_df = read_csv( StringIO(buffer), names=names, dtype=dtypes, quoting=quoting ) pd_df = pd.read_csv(StringIO(buffer), names=names, quoting=quoting) assert_eq(pd_df, gd_df) def test_read_csv_names_header_combination(): pdf = pd.DataFrame( { "firstname": ["Emma", "Ava", "Sophia"], "lastname": ["Olivia", "Isabella", "Charlotte"], "gender": ["F", "F", "F"], } ) buffer = pdf.to_csv(header=True, index=False) names = pdf.columns gdf = read_csv(StringIO(buffer), names=names, header=0) assert_eq(pdf, gdf) gdf = read_csv(StringIO(buffer), header=0) assert_eq(pdf, gdf) gdf = read_csv(StringIO(buffer)) assert_eq(pdf, gdf) def test_csv_reader_scientific_type_detection(): buffer = """1.,1.1,-1.1,1E1,1e1,-1e1,-1e-1,1e-1,1.1e1,1.1e-1,-1.1e-1,-1.1e1 +1.1,1E+1,1e+1,+1e1,+1e-1,1e-1,+1.1e1,1.1e+1,+1.1e+1,+1.1e1""" expected = [ 1.0, 1.1, -1.1, 10.0, 10.0, -10, -0.1, 0.1, 11, 0.11, -0.11, -11, 1.1, 10.0, 10.0, 10, 0.1, 0.1, 11, 11, 11, 11, ] df = read_csv(StringIO(buffer), header=None) for dt in df.dtypes: assert dt == "float64" for col in df: assert np.isclose(df[col][0], expected[int(col)]) @pytest.mark.parametrize("lineterminator", ["\n", "\r\n"]) def test_csv_blank_first_row(lineterminator): lines = ["colA,colB", "", "1, 1.1", "2, 2.2"] buffer = lineterminator.join(lines) cu_df = read_csv(StringIO(buffer)) assert cu_df.shape == (2, 2) assert all(cu_df.columns == ["colA", "colB"]) @pytest.mark.parametrize("contents", ["", "\n"]) def test_csv_empty_file(tmpdir, contents): fname = tmpdir.mkdir("gdf_csv").join("test_csv_empty_file.csv") with open(fname, "w") as f: f.write(contents) col_names = ["col1", "col2", "col3", "col4"] in_dtypes = ["int", "str", "float", "short"] out_dtypes = ["int64", "object", "float64", "int16"] # Empty dataframe if no columns names specified or inferred df = read_csv(str(fname)) assert len(df.columns) == 0 # No row dataframe if columns names are specified or inferred df = read_csv(str(fname), dtype=in_dtypes, names=col_names) assert all(df.columns == col_names) assert list(df.dtypes) == out_dtypes @pytest.mark.parametrize("contents", ["", "\n"]) def test_csv_empty_buffer(tmpdir, contents): col_names = ["col1", "col2", "col3", "col4"] in_dtypes = ["int", "str", "float", "short"] out_dtypes = ["int64", "object", "float64", "int16"] # Empty dataframe if no columns names specified or inferred df = read_csv(StringIO(contents)) assert len(df.columns) == 0 # No row dataframe if columns names are specified or inferred df = read_csv(StringIO(contents), dtype=in_dtypes, names=col_names) assert all(df.columns == col_names) assert list(df.dtypes) == out_dtypes @pytest.mark.parametrize( "dtype", [["short", "float", "int"], {"A": "short", "C": "int"}] ) def test_csv_reader_partial_dtype(dtype): names_df = read_csv( StringIO("0,1,2"), names=["A", "B", "C"], dtype=dtype, usecols=["A", "C"], ) header_df = read_csv( StringIO('"A","B","C"\n0,1,2'), dtype=dtype, usecols=["A", "C"] ) assert names_df == header_df assert all(names_df.dtypes == ["int16", "int64"]) def test_csv_writer_file_handle(tmpdir): df = pd.DataFrame({"a": [1, 2, 3], "b": ["xxx", "yyyy", "zzzzz"]}) gdf = cudf.from_pandas(df) gdf_df_fname = tmpdir.join("gdf_df_1.csv") with open(gdf_df_fname, "w") as f: gdf.to_csv(path_or_buf=f, index=False) assert os.path.exists(gdf_df_fname) gdf2 = pd.read_csv(gdf_df_fname) assert_eq(gdf, gdf2) def test_csv_writer_file_append(tmpdir): gdf1 = cudf.DataFrame({"a": [1, 2, 3], "b": ["xxx", "yyyy", "zzzzz"]}) gdf2 = cudf.DataFrame({"a": [4, 5, 6], "b": ["foo", "bar", "baz"]}) gdf_df_fname = tmpdir.join("gdf_df_append.csv") with open(gdf_df_fname, "w") as f: gdf1.to_csv(f, index=False) with open(gdf_df_fname, "a") as f: gdf2.to_csv(f, header=False, index=False) result = cudf.read_csv(gdf_df_fname) expected = cudf.concat([gdf1, gdf2], ignore_index=True) assert_eq(result, expected, check_index_type=True) def test_csv_writer_buffer(tmpdir): gdf = cudf.DataFrame({"a": [1, 2, 3], "b": ["xxx", "yyyy", "zzzzz"]}) buffer = BytesIO() gdf.to_csv(buffer, index=False) result = cudf.read_csv(buffer) assert_eq(result, gdf) @pytest.mark.parametrize("dtype", dtypes) @pytest.mark.parametrize("nelem", nelem) def test_csv_writer_numeric_data(dtype, nelem, tmpdir): pdf_df_fname = tmpdir.join("pdf_df_1.csv") gdf_df_fname = tmpdir.join("gdf_df_1.csv") df = make_numeric_dataframe(nelem, dtype) gdf = cudf.from_pandas(df) df.to_csv(path_or_buf=pdf_df_fname, index=False, lineterminator="\n") gdf.to_csv(path_or_buf=gdf_df_fname, index=False) assert os.path.exists(pdf_df_fname) assert os.path.exists(gdf_df_fname) expect = pd.read_csv(pdf_df_fname) got = pd.read_csv(gdf_df_fname) assert_eq(expect, got) def test_csv_writer_datetime_data(tmpdir): pdf_df_fname = tmpdir.join("pdf_df_2.csv") gdf_df_fname = tmpdir.join("gdf_df_2.csv") df = make_datetime_dataframe() gdf = cudf.from_pandas(df) df.to_csv(path_or_buf=pdf_df_fname, index=False, lineterminator="\n") gdf.to_csv(path_or_buf=gdf_df_fname, index=False) assert os.path.exists(pdf_df_fname) assert os.path.exists(gdf_df_fname) expect = pd.read_csv(pdf_df_fname) got = pd.read_csv(gdf_df_fname) assert_eq(expect, got) @pytest.mark.parametrize("lineterminator", ["\r", "\n", "\t", np.str_("\n")]) @pytest.mark.parametrize("sep", [",", "/", np.str_(",")]) def test_csv_writer_terminator_sep(lineterminator, sep, cudf_mixed_dataframe): df = cudf_mixed_dataframe buffer = BytesIO() df.to_csv(buffer, lineterminator=lineterminator, sep=sep, index=False) got = read_csv(buffer, lineterminator=lineterminator, sep=sep) assert_eq(df, got) @pytest.mark.parametrize( "lineterminator", ["\r\n", "ABC", "\t\t", np.str_("\r\n")] ) def test_csv_writer_multichar_terminator(lineterminator, cudf_mixed_dataframe): df = cudf_mixed_dataframe default_terminator_csv = StringIO() df.to_csv(default_terminator_csv) # Need to check manually since readers don't support # multicharacter line terminators expected = default_terminator_csv.getvalue().replace("\n", lineterminator) buffer = StringIO() df.to_csv(buffer, lineterminator=lineterminator) got = buffer.getvalue() assert_eq(expected, got) @pytest.mark.parametrize( "columns", [ ["Date", "Float"], ["Integer2", "Float", "Date", "Integer", "String", "Boolean"], None, ], ) @pytest.mark.parametrize( "header", [True, False, np.bool_(True), np.bool_(False)] ) @pytest.mark.parametrize( "index", [True, False, np.bool_(True), np.bool_(False)] ) def test_csv_writer_column_and_header_options( columns, header, index, pd_mixed_dataframe ): pdf = pd_mixed_dataframe df = cudf.from_pandas(pdf) cudf_buffer = BytesIO() df.to_csv(cudf_buffer, columns=columns, header=header, index=index) pd_buffer = BytesIO() pdf.to_csv(pd_buffer, columns=columns, header=header, index=index) expected = cudf.read_csv(pd_buffer, header=0 if header else None) got = cudf.read_csv(cudf_buffer, header=0 if header else None) expected_column_cnt = (1 if index else 0) + ( len(columns) if columns else pdf.shape[1] ) assert_eq(expected_column_cnt, got.shape[1]) assert_eq(expected, got) def test_csv_writer_empty_columns_parameter(cudf_mixed_dataframe): df = cudf_mixed_dataframe write_str = df.to_csv(columns=[], index=False) assert_eq(write_str, "\n") def test_csv_writer_multiindex(tmpdir): pdf_df_fname = tmpdir.join("pdf_df_3.csv") gdf_df_fname = tmpdir.join("gdf_df_3.csv") np.random.seed(0) gdf = cudf.DataFrame( { "a": np.random.randint(0, 5, 20), "b": np.random.randint(0, 5, 20), "c": range(20), "d": np.random.random(20), } ) gdg = gdf.groupby(["a", "b"]).mean() pdg = gdg.to_pandas() pdg.to_csv(pdf_df_fname) gdg.to_csv(gdf_df_fname) assert os.path.exists(pdf_df_fname) assert os.path.exists(gdf_df_fname) expect = pd.read_csv(pdf_df_fname) got = pd.read_csv(gdf_df_fname) assert_eq(expect, got) @pytest.mark.parametrize("chunksize", [None, 9, 1000]) @pytest.mark.parametrize("dtype", dtypes) def test_csv_writer_chunksize(chunksize, dtype): cu_df = cudf.from_pandas(make_numeric_dataframe(100, dtype)) buffer = BytesIO() cu_df.to_csv(buffer, chunksize=chunksize, index=False) got = cudf.read_csv(buffer, dtype=[dtype]) assert_eq(cu_df, got) @pytest.mark.parametrize( "df", [ cudf.DataFrame({"vals": [1, 2, 3]}), cudf.DataFrame( {"vals1": [1, 2, 3], "vals2": ["hello", "rapids", "cudf"]} ), cudf.DataFrame( {"vals1": [None, 2.0, 3.0], "vals2": ["hello", "rapids", None]} ), ], ) def test_to_csv_empty_filename(df): pdf = df.to_pandas() actual = df.to_csv() expected = pdf.to_csv() assert actual == expected @pytest.mark.parametrize( "df", [ cudf.DataFrame({"vals": [1, 2, 3]}), cudf.DataFrame( {"vals1": [1, 2, 3], "vals2": ["hello", "rapids", "cudf"]} ), cudf.DataFrame( {"vals1": [None, 2.0, 3.0], "vals2": ["hello", "rapids", None]} ), ], ) def test_to_csv_StringIO(df): cudf_io = StringIO() pandas_io = StringIO() pdf = df.to_pandas() df.to_csv(cudf_io) pdf.to_csv(pandas_io) cudf_io.seek(0) pandas_io.seek(0) assert cudf_io.read() == pandas_io.read() def test_csv_writer_empty_dataframe(tmpdir): df_fname = tmpdir.join("gdf_df_5.csv") gdf = cudf.DataFrame({"float_point": [], "integer": []}) gdf["float_point"] = gdf["float_point"].astype("float") gdf["integer"] = gdf["integer"].astype("int") gdf.to_csv(df_fname, index=False) df = cudf.read_csv(df_fname) assert df.shape == (0, 2) assert all(df.dtypes == ["object", "object"]) def test_csv_write_chunksize_corner_case(tmpdir): # With this num of rows and chunksize # libcudf splits table such a way that it # will end up creating an empty table slice # which caused the issue 5588. df_fname = tmpdir.join("gdf_df_17.csv") df = cudf.DataFrame({"a": np.arange(10_000)}) df.to_csv(df_fname, chunksize=1000, index=False) got = cudf.read_csv(df_fname) assert_eq(df, got) def test_csv_write_no_caller_manipulation(): df = cudf.DataFrame({"a": [1, 2, 3]}) df_copy = df.copy(deep=True) _ = df.to_csv(index=True) assert_eq(df, df_copy) @pytest.mark.parametrize( "df", [ cudf.DataFrame({"a": [1, 2, 3], "": [10, 20, 40]}), cudf.DataFrame({"": [10, 20, 40], "a": [1, 2, 3]}), cudf.DataFrame( {"a": [1, 2, 3], "": [10, 20, 40]}, index=cudf.Index(["a", "z", "v"], name="custom name"), ), ], ) @pytest.mark.parametrize("index", [True, False]) @pytest.mark.parametrize("columns", [["a"], [""], None]) def test_csv_write_empty_column_name(df, index, columns): pdf = df.to_pandas() expected = pdf.to_csv(index=index, columns=columns) actual = df.to_csv(index=index, columns=columns) assert expected == actual @pytest.mark.parametrize( "df", [ cudf.DataFrame(), cudf.DataFrame(index=cudf.Index([], name="index name")), ], ) @pytest.mark.parametrize("index", [True, False]) def test_csv_write_empty_dataframe(df, index): pdf = df.to_pandas() expected = pdf.to_csv(index=index) actual = df.to_csv(index=index) assert expected == actual @pytest.mark.parametrize( "df", [ pd.DataFrame( { "a": [1, 2, 3, None], "": ["a", "v", None, None], None: [12, 12, 32, 44], } ), pd.DataFrame( { np.nan: [1, 2, 3, None], "": ["a", "v", None, None], None: [12, 12, 32, 44], } ), pd.DataFrame({"": [1, None, 3, 4]}), pd.DataFrame({None: [1, None, 3, 4]}), pd.DataFrame(columns=[None, "", "a", "b"]), pd.DataFrame(columns=[None]), pd.DataFrame(columns=[""]), ], ) @pytest.mark.parametrize( "na_rep", ["", "_NA_", "---", "_____CUSTOM_NA_REP______"] ) def test_csv_write_dataframe_na_rep(df, na_rep): gdf = cudf.from_pandas(df) expected = df.to_csv(na_rep=na_rep) actual = gdf.to_csv(na_rep=na_rep) assert expected == actual @pytest.mark.parametrize( "dtype", [ "int", "str", "float", np.int32, np.dtype("float32"), {"a": "int32", "b": "float64", "c": "uint8"}, int, str, object, ], ) def test_csv_reader_dtypes(dtype): buf = "a,b,c\n1,10,111\n2,11,112\n3,12,113\n4,13,114\n" expected = pd.read_csv(StringIO(buf), dtype=dtype) actual = cudf.read_csv(StringIO(buf), dtype=dtype) assert_eq(expected, actual) @pytest.mark.parametrize( "dtype", ["Int64", "UInt32", {"a": "UInt64", "b": "Float64", "c": "Int32"}] ) def test_csv_reader_nullable_dtypes(dtype): buf = "a,b,c\n1,10,111\n2,11,112\n3,12,113\n4,13,114\n" expected = pd.read_csv(StringIO(buf), dtype=dtype) actual = cudf.read_csv(StringIO(buf), dtype=dtype) assert_eq(expected, actual.to_pandas(nullable=True)) @pytest.mark.parametrize( "dtype", sorted(list(cudf.utils.dtypes.TIMEDELTA_TYPES)) ) def test_csv_reader_timedetla_dtypes(dtype): buf = "a,b,c\n1,10,111\n2,11,112\n3,12,113\n43432423,13342,13243214\n" expected = pd.read_csv(StringIO(buf)).astype(dtype) actual = cudf.read_csv(StringIO(buf), dtype=dtype) assert_eq(expected, actual) @pytest.mark.parametrize( "dtype", sorted(list(cudf.utils.dtypes.DATETIME_TYPES)) ) def test_csv_reader_datetime_dtypes(dtype): buf = "a,b,c\n1,10,111\n2,11,112\n3,12,113\n43432423,13342,13243214\n" expected = pd.read_csv(StringIO(buf)).astype(dtype) actual = cudf.read_csv(StringIO(buf), dtype=dtype) assert_eq(expected, actual) @pytest.mark.parametrize( "df", [ cudf.DataFrame( { "a": cudf.Series([1, 2, 3, 1, 2], dtype="category"), "b": cudf.Series(["a", "c", "a", "b", "a"], dtype="category"), } ), cudf.DataFrame( { "a": cudf.Series([1.1, 2, 3, 1.1, 2], dtype="category"), "b": cudf.Series( [None, "c", None, "b", "a"], dtype="category" ), } ), cudf.DataFrame( { "b": cudf.Series( [1.1, 2, 3, 1.1, 2], dtype="category", index=cudf.CategoricalIndex( ["abc", "def", "ghi", "jkl", "xyz"] ), ) } ), ], ) def test_csv_writer_category(df): pdf = df.to_pandas() expected = pdf.to_csv() actual = df.to_csv() assert expected == actual @pytest.mark.parametrize( "dtype", [ "category", {"a": "category", "b": "str"}, {"b": "category"}, {"a": "category"}, {"a": pd.CategoricalDtype([1, 2])}, {"b": pd.CategoricalDtype([1, 2, 3])}, {"b": pd.CategoricalDtype(["b", "a"]), "a": "str"}, pd.CategoricalDtype(["a", "b"]), ], ) def test_csv_reader_category(dtype): df = cudf.DataFrame({"a": [1, 2, 3, None], "b": ["a", "b", None, "c"]}) csv_buf = df.to_csv() actual = cudf.read_csv(StringIO(csv_buf), dtype=dtype) expected = pd.read_csv(StringIO(csv_buf), dtype=dtype) assert_eq(expected, actual, check_dtype=True) def test_csv_writer_datetime_sep(): df = cudf.DataFrame( {"a": cudf.Series([22343, 2323423, 234324234], dtype="datetime64[ns]")} ) df["a"] = df["a"].astype("datetime64[s]") expected = df.to_pandas().to_csv(date_format="%Y-%m-%dT%H:%M:%SZ", sep="-") actual = df.to_csv(sep="-") assert expected == actual def test_na_filter_empty_fields(): test_na = "TEST_NAN" df = pd.DataFrame({"col0": ["valid", None, "also_valid", "", test_na]}) buffer = df.to_csv(index=False) pdf = pd.read_csv(StringIO(buffer), na_filter=False) gdf = cudf.read_csv(StringIO(buffer), na_filter=False) assert_eq(pdf, gdf) pdf = pd.read_csv(StringIO(buffer), keep_default_na=False) gdf = cudf.read_csv(StringIO(buffer), keep_default_na=False) assert_eq(pdf, gdf) pdf = pd.read_csv( StringIO(buffer), keep_default_na=False, na_values=test_na ) gdf = cudf.read_csv( StringIO(buffer), keep_default_na=False, na_values=test_na ) assert_eq(pdf, gdf) def test_csv_sep_error(): pdf = pd.DataFrame({"a": [1, 2, 3]}) gdf = cudf.DataFrame({"a": [1, 2, 3]}) assert_exceptions_equal( lfunc=pdf.to_csv, rfunc=gdf.to_csv, lfunc_args_and_kwargs=([], {"sep": "abc"}), rfunc_args_and_kwargs=([], {"sep": "abc"}), ) assert_exceptions_equal( lfunc=pdf.to_csv, rfunc=gdf.to_csv, lfunc_args_and_kwargs=([], {"sep": 1}), rfunc_args_and_kwargs=([], {"sep": 1}), ) def test_to_csv_encoding_error(): # TODO: Remove this test once following # issue is fixed: https://github.com/rapidsai/cudf/issues/2957 df = cudf.DataFrame({"a": ["你好", "test"]}) encoding = "utf-8-sig" error_message = ( f"Encoding {encoding} is not supported. " + "Currently, only utf-8 encoding is supported." ) with pytest.raises(NotImplementedError, match=re.escape(error_message)): df.to_csv("test.csv", encoding=encoding) def test_to_csv_compression_error(): df = cudf.DataFrame({"a": ["test"]}) compression = "snappy" error_message = "Writing compressed csv is not currently supported in cudf" with pytest.raises(NotImplementedError, match=re.escape(error_message)): df.to_csv("test.csv", compression=compression) def test_empty_df_no_index(): actual = cudf.DataFrame({}) buffer = BytesIO() actual.to_csv(buffer, index=False) result = cudf.read_csv(buffer) assert_eq(actual, result) def test_default_integer_bitwidth( cudf_mixed_dataframe, default_integer_bitwidth ): # Test that integer columns in csv are _inferred_ as user specified # bitwidth buf = BytesIO() cudf_mixed_dataframe.to_csv(buf) buf.seek(0) read = cudf.read_csv(buf) assert read["Integer"].dtype == np.dtype(f"i{default_integer_bitwidth//8}") assert read["Integer2"].dtype == np.dtype( f"i{default_integer_bitwidth//8}" ) def test_default_integer_bitwidth_partial( cudf_mixed_dataframe, default_integer_bitwidth ): # Test that integer columns in csv are _inferred_ as user specified # bitwidth buf = BytesIO() cudf_mixed_dataframe.to_csv(buf) buf.seek(0) read = cudf.read_csv(buf, dtype={"Integer": "int64"}) assert read["Integer"].dtype == np.dtype("i8") assert read["Integer2"].dtype == np.dtype( f"i{default_integer_bitwidth//8}" ) @pytest.mark.filterwarnings("ignore:invalid value encountered in cast") def test_default_integer_bitwidth_extremes( cudf_extreme_numeric_dataframe, default_integer_bitwidth ): # Test that integer columns in csv are _inferred_ as user specified # bitwidth buf = BytesIO() cudf_extreme_numeric_dataframe.to_csv(buf) buf.seek(0) read = cudf.read_csv(buf) assert read["int64"].dtype == np.dtype(f"i{default_integer_bitwidth//8}") assert read["long"].dtype == np.dtype(f"i{default_integer_bitwidth//8}") assert read["uint64"].dtype == np.dtype(f"u{default_integer_bitwidth//8}") def test_default_float_bitwidth(cudf_mixed_dataframe, default_float_bitwidth): # Test that float columns in csv are _inferred_ as user specified # bitwidth buf = BytesIO() cudf_mixed_dataframe.to_csv(buf) buf.seek(0) read = cudf.read_csv(buf) assert read["Float"].dtype == np.dtype(f"f{default_float_bitwidth//8}") def test_default_float_bitwidth_partial(default_float_bitwidth): # Test that float columns in csv are _inferred_ as user specified # bitwidth read = cudf.read_csv( StringIO("float1,float2\n1.0,2.0\n3.0,4.0"), dtype={"float2": "float64"}, ) assert read["float1"].dtype == np.dtype(f"f{default_float_bitwidth//8}") assert read["float2"].dtype == np.dtype("f8") @pytest.mark.parametrize( "usecols,names", [ # selection using indices; only names of selected columns are specified ([1, 2], ["b", "c"]), # selection using indices; names of all columns are specified ([1, 2], ["a", "b", "c"]), # selection using indices; duplicates ([2, 2], ["a", "b", "c"]), # selection using indices; out of order ([2, 1], ["a", "b", "c"]), # selection using names (["b"], ["a", "b", "c"]), # selection using names; multiple columns (["b", "c"], ["a", "b", "c"]), # selection using names; duplicates (["c", "c"], ["a", "b", "c"]), # selection using names; out of order (["c", "b"], ["a", "b", "c"]), ], ) def test_column_selection_plus_column_names(usecols, names): lines = [ "num,datetime,text", "123,2018-11-13T12:00:00,abc", "456,2018-11-14T12:35:01,def", "789,2018-11-15T18:02:59,ghi", ] buffer = "\n".join(lines) + "\n" assert_eq( pd.read_csv(StringIO(buffer), usecols=usecols, names=names), cudf.read_csv(StringIO(buffer), usecols=usecols, names=names), ) def test_read_compressed_BOM(tmpdir): buffer = 'int, string\n1, "a"\n2, "b"\n3, "c"\n' fname = tmpdir.mkdir("gdf_csv").join("tmp_csvreader_file20.gz") with gzip.open(fname, "wt", encoding="utf-8") as f: f.write(codecs.BOM_UTF8.decode("utf-8")) f.write(buffer) assert_eq(pd.read_csv(fname), cudf.read_csv(fname))
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_array_function.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq # To determine if NEP18 is available in the current version of NumPy we simply # attempt to concatenate an object with `__array_function__` defined and see if # NumPy invokes the protocol or not. Taken from dask array # https://github.com/dask/dask/blob/master/dask/array/utils.py#L352-L363 # TODO: Unclear if this is still necessary. NEP 18 was introduced as the # default in 1.17 (https://github.com/numpy/numpy/releases/tag/v1.17.0) almost # 3 years ago, and it was originally introduced one version before in 1.16 # (although not enabled by default then). Can we safely assume that testers # will have a sufficiently new version of numpy to run these tests? class _Test: def __array_function__(self, *args, **kwargs): return True try: np.concatenate([_Test()]) except ValueError: missing_arrfunc_cond = True else: missing_arrfunc_cond = False del _Test missing_arrfunc_reason = "NEP-18 support is not available in NumPy" @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize("np_ar", [np.random.random(100)]) @pytest.mark.parametrize( "func", [ lambda x: np.mean(x), lambda x: np.sum(x), lambda x: np.var(x, ddof=1), lambda x: np.unique(x), lambda x: np.dot(x, x), lambda x: np.linalg.norm(x), ], ) def test_array_func_cudf_series(np_ar, func): cudf_ser = cudf.Series(np_ar) expect = func(np_ar) got = func(cudf_ser) if np.isscalar(expect): assert_eq(expect, got) else: assert_eq(expect, got.to_numpy()) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize( "pd_df", [pd.DataFrame(np.random.uniform(size=(100, 10)))] ) @pytest.mark.parametrize( "func", [ lambda x: np.mean(x, axis=0), lambda x: np.sum(x, axis=0), lambda x: np.var(x, ddof=1), lambda x: np.dot(x, x.transpose()), lambda x: np.all(x), lambda x: np.any(x), lambda x: np.product(x), lambda x: np.product(x, axis=0), lambda x: np.product(x, axis=1), ], ) def test_array_func_cudf_dataframe(pd_df, func): cudf_df = cudf.from_pandas(pd_df) expect = func(pd_df) got = func(cudf_df) assert_eq(expect, got) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize( "pd_df", [pd.DataFrame(np.random.uniform(size=(100, 10)))] ) @pytest.mark.parametrize( "func", [ lambda x: np.cov(x, x), lambda x: np.linalg.norm(x), lambda x: np.linalg.det(x), ], ) def test_array_func_missing_cudf_dataframe(pd_df, func): cudf_df = cudf.from_pandas(pd_df) with pytest.raises(TypeError): func(cudf_df) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize("np_ar", [np.random.random(100)]) @pytest.mark.parametrize( "func", [ lambda x: np.mean(x), lambda x: np.sum(x), lambda x: np.var(x, ddof=1), lambda x: np.unique(x), lambda x: np.dot(x, x), ], ) def test_array_func_cudf_index(np_ar, func): cudf_index = cudf.core.index.as_index(cudf.Series(np_ar)) expect = func(np_ar) got = func(cudf_index) if np.isscalar(expect): assert_eq(expect, got) else: assert_eq(expect, got.to_numpy()) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize("np_ar", [np.random.random(100)]) @pytest.mark.parametrize( "func", [ lambda x: np.cov(x, x), lambda x: np.linalg.norm(x), lambda x: np.linalg.det(x), ], ) def test_array_func_missing_cudf_index(np_ar, func): cudf_index = cudf.core.index.as_index(cudf.Series(np_ar)) with pytest.raises(TypeError): func(cudf_index) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) @pytest.mark.parametrize( "func", [ lambda x: np.cov(x, x), lambda x: np.dot(x, x), lambda x: np.linalg.norm(x), lambda x: np.linalg.det(x), ], ) def test_array_func_missing_cudf_multi_index(func): levels = [["a", "b"], ["c", "d"]] codes = [[0, 1], [1, 0]] cudf_multi_index = cudf.MultiIndex(levels, codes) with pytest.raises(TypeError): func(cudf_multi_index) @pytest.mark.skipif(missing_arrfunc_cond, reason=missing_arrfunc_reason) def test_list_input_array_func(): ar = np.array([1, 2, 3]) s = cudf.Series(ar) with pytest.raises(TypeError): np.concatenate([s, s, s]) s = cudf.Series(ar, index=[1, 2, 3]) with pytest.raises(TypeError): np.concatenate([s, s, s])
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_indexing.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. from datetime import datetime from itertools import combinations import cupy import numpy as np import pandas as pd import pytest import cudf from cudf.testing import _utils as utils from cudf.testing._utils import ( INTEGER_TYPES, assert_eq, assert_exceptions_equal, ) index_dtypes = INTEGER_TYPES @pytest.fixture def pdf_gdf(): pdf = pd.DataFrame( {"a": [1, 2, 3], "b": ["c", "d", "e"]}, index=["one", "two", "three"] ) gdf = cudf.from_pandas(pdf) return pdf, gdf @pytest.fixture def pdf_gdf_multi(): pdf = pd.DataFrame(np.random.rand(7, 5)) pdfIndex = pd.MultiIndex( [ ["a", "b", "c"], ["house", "store", "forest"], ["clouds", "clear", "storm"], ["fire", "smoke", "clear"], ], [ [0, 0, 0, 0, 1, 1, 2], [1, 1, 1, 1, 0, 0, 2], [0, 0, 2, 2, 2, 0, 1], [0, 0, 0, 1, 2, 0, 1], ], ) pdfIndex.names = ["alpha", "location", "weather", "sign"] pdf.index = pdfIndex gdf = cudf.from_pandas(pdf) return pdf, gdf @pytest.mark.parametrize( "i1, i2, i3", ( [ (slice(None, 12), slice(3, None), slice(None, None, 2)), (range(12), range(3, 12), range(0, 9, 2)), (np.arange(12), np.arange(3, 12), np.arange(0, 9, 2)), (list(range(12)), list(range(3, 12)), list(range(0, 9, 2))), ( pd.Series(range(12)), pd.Series(range(3, 12)), pd.Series(range(0, 9, 2)), ), ( cudf.Series(range(12)), cudf.Series(range(3, 12)), cudf.Series(range(0, 9, 2)), ), ( [i in range(12) for i in range(20)], [i in range(3, 12) for i in range(12)], [i in range(0, 9, 2) for i in range(9)], ), ( np.array([i in range(12) for i in range(20)], dtype=bool), np.array([i in range(3, 12) for i in range(12)], dtype=bool), np.array([i in range(0, 9, 2) for i in range(9)], dtype=bool), ), ] + [ ( np.arange(12, dtype=t), np.arange(3, 12, dtype=t), np.arange(0, 9, 2, dtype=t), ) for t in index_dtypes ] ), ids=( [ "slice", "range", "numpy.array", "list", "pandas.Series", "Series", "list[bool]", "numpy.array[bool]", ] + ["numpy.array[%s]" % np.dtype(t).type.__name__ for t in index_dtypes] ), ) def test_series_indexing(i1, i2, i3): a1 = np.arange(20) series = cudf.Series(a1) # Indexing sr1 = series.iloc[i1] assert sr1.null_count == 0 np.testing.assert_equal(sr1.to_numpy(), a1[:12]) sr2 = sr1.iloc[i2] assert sr2.null_count == 0 np.testing.assert_equal(sr2.to_numpy(), a1[3:12]) # Index with stride sr3 = sr2.iloc[i3] assert sr3.null_count == 0 np.testing.assert_equal(sr3.to_numpy(), a1[3:12:2]) # Integer indexing if isinstance(i1, range): for i in i1: # Python int-s assert series[i] == a1[i] if isinstance(i1, np.ndarray) and i1.dtype in index_dtypes: for i in i1: # numpy integers assert series[i] == a1[i] @pytest.mark.parametrize( "arg", [ 1, -1, "b", np.int32(1), np.uint32(1), np.int8(1), np.uint8(1), np.int16(1), np.uint16(1), np.int64(1), np.uint64(1), ], ) def test_series_get_item_iloc_defer(arg): # Indexing for non-numeric dtype Index ps = pd.Series([1, 2, 3], index=pd.Index(["a", "b", "c"])) gs = cudf.from_pandas(ps) expect = ps[arg] got = gs[arg] assert_eq(expect, got) def test_series_iloc_defer_cudf_scalar(): ps = pd.Series([1, 2, 3], index=pd.Index(["a", "b", "c"])) gs = cudf.from_pandas(ps) for t in index_dtypes: arg = cudf.Scalar(1, dtype=t) got = gs[arg] expect = 2 assert_eq(expect, got) def test_series_indexing_large_size(): n_elem = 100_000 gsr = cudf.Series(cupy.ones(n_elem)) gsr[0] = None got = gsr[gsr.isna()] expect = cudf.Series([None], dtype="float64") assert_eq(expect, got) @pytest.mark.parametrize("psr", [pd.Series([1, 2, 3], index=["a", "b", "c"])]) @pytest.mark.parametrize( "arg", ["b", ["a", "c"], slice(1, 2, 1), [True, False, True]] ) def test_series_get_item(psr, arg): gsr = cudf.from_pandas(psr) expect = psr[arg] got = gsr[arg] assert_eq(expect, got) def test_dataframe_column_name_indexing(): df = cudf.DataFrame() data = np.asarray(range(10), dtype=np.int32) df["a"] = data df[1] = data np.testing.assert_equal( df["a"].to_numpy(), np.asarray(range(10), dtype=np.int32) ) np.testing.assert_equal( df[1].to_numpy(), np.asarray(range(10), dtype=np.int32) ) pdf = pd.DataFrame() nelem = 10 pdf["key1"] = np.random.randint(0, 5, nelem) pdf["key2"] = np.random.randint(0, 3, nelem) pdf[1] = np.arange(1, 1 + nelem) pdf[2] = np.random.random(nelem) df = cudf.from_pandas(pdf) assert_eq(df[df.columns], df) assert_eq(df[df.columns[:1]], df[["key1"]]) for i in range(1, len(pdf.columns) + 1): for idx in combinations(pdf.columns, i): assert pdf[list(idx)].equals(df[list(idx)].to_pandas()) # test for only numeric columns df = pd.DataFrame() for i in range(0, 10): df[i] = range(nelem) gdf = cudf.DataFrame.from_pandas(df) assert_eq(gdf, df) assert_eq(gdf[gdf.columns], gdf) assert_eq(gdf[gdf.columns[:3]], gdf[[0, 1, 2]]) def test_dataframe_slicing(): df = cudf.DataFrame() size = 123 df["a"] = ha = np.random.randint(low=0, high=100, size=size).astype( np.int32 ) df["b"] = hb = np.random.random(size).astype(np.float32) df["c"] = hc = np.random.randint(low=0, high=100, size=size).astype( np.int64 ) df["d"] = hd = np.random.random(size).astype(np.float64) # Row slice first 10 first_10 = df[:10] assert len(first_10) == 10 assert tuple(first_10.columns) == ("a", "b", "c", "d") np.testing.assert_equal(first_10["a"].to_numpy(), ha[:10]) np.testing.assert_equal(first_10["b"].to_numpy(), hb[:10]) np.testing.assert_equal(first_10["c"].to_numpy(), hc[:10]) np.testing.assert_equal(first_10["d"].to_numpy(), hd[:10]) del first_10 # Row slice last 10 last_10 = df[-10:] assert len(last_10) == 10 assert tuple(last_10.columns) == ("a", "b", "c", "d") np.testing.assert_equal(last_10["a"].to_numpy(), ha[-10:]) np.testing.assert_equal(last_10["b"].to_numpy(), hb[-10:]) np.testing.assert_equal(last_10["c"].to_numpy(), hc[-10:]) np.testing.assert_equal(last_10["d"].to_numpy(), hd[-10:]) del last_10 # Row slice [begin:end] begin = 7 end = 121 subrange = df[begin:end] assert len(subrange) == end - begin assert tuple(subrange.columns) == ("a", "b", "c", "d") np.testing.assert_equal(subrange["a"].to_numpy(), ha[begin:end]) np.testing.assert_equal(subrange["b"].to_numpy(), hb[begin:end]) np.testing.assert_equal(subrange["c"].to_numpy(), hc[begin:end]) np.testing.assert_equal(subrange["d"].to_numpy(), hd[begin:end]) del subrange @pytest.mark.parametrize("step", [1, 2, 5]) @pytest.mark.parametrize("scalar", [0, 20, 100]) def test_dataframe_loc(scalar, step): size = 123 pdf = pd.DataFrame( { "a": np.random.randint(low=0, high=100, size=size), "b": np.random.random(size).astype(np.float32), "c": np.random.random(size).astype(np.float64), "d": np.random.random(size).astype(np.float64), } ) pdf.index.name = "index" df = cudf.DataFrame.from_pandas(pdf) assert_eq(df.loc[:, ["a"]], pdf.loc[:, ["a"]]) assert_eq(df.loc[:, "d"], pdf.loc[:, "d"]) # Scalar label assert_eq(df.loc[scalar], pdf.loc[scalar]) # Full slice assert_eq(df.loc[:, "c"], pdf.loc[:, "c"]) # Repeat with at[] assert_eq(df.loc[:, ["a"]], df.at[:, ["a"]]) assert_eq(df.loc[:, "d"], df.at[:, "d"]) assert_eq(df.loc[scalar], df.at[scalar]) assert_eq(df.loc[:, "c"], df.at[:, "c"]) begin = 110 end = 122 assert_eq( df.loc[begin:end:step, ["c", "d", "a"]], pdf.loc[begin:end:step, ["c", "d", "a"]], ) assert_eq(df.loc[begin:end, ["c", "d"]], pdf.loc[begin:end, ["c", "d"]]) # Slicing on columns: assert_eq( df.loc[begin:end:step, "a":"c"], pdf.loc[begin:end:step, "a":"c"] ) # Slicing of size 1: assert_eq(df.loc[begin:begin, "a"], pdf.loc[begin:begin, "a"]) # TODO: Pandas changes the dtype here when it shouldn't assert_eq( df.loc[begin, "a":"a"], pdf.loc[begin, "a":"a"], check_dtype=False ) # Repeat with at[] assert_eq( df.loc[begin:end:step, ["c", "d", "a"]], df.at[begin:end:step, ["c", "d", "a"]], ) assert_eq(df.loc[begin:end, ["c", "d"]], df.at[begin:end, ["c", "d"]]) assert_eq(df.loc[begin:end:step, "a":"c"], df.at[begin:end:step, "a":"c"]) assert_eq(df.loc[begin:begin, "a"], df.at[begin:begin, "a"]) assert_eq(df.loc[begin, "a":"a"], df.at[begin, "a":"a"], check_dtype=False) # Make int64 index offset = 50 df2 = df[offset:] pdf2 = pdf[offset:] begin = 117 end = 122 assert_eq( df2.loc[begin:end, ["c", "d", "a"]], pdf2.loc[begin:end, ["c", "d", "a"]], ) # loc with list like indexing assert_eq(df.loc[[0]], pdf.loc[[0]]) # loc with column like indexing assert_eq(df.loc[cudf.Series([0])], pdf.loc[pd.Series([0])]) assert_eq(df.loc[cudf.Series([0])._column], pdf.loc[pd.Series([0])]) assert_eq(df.loc[np.array([0])], pdf.loc[np.array([0])]) def test_dataframe_loc_duplicate_index_scalar(): pdf = pd.DataFrame({"a": [1, 2, 3, 4, 5]}, index=[1, 2, 1, 4, 2]) gdf = cudf.DataFrame.from_pandas(pdf) pdf_sorted = pdf.sort_values(by=list(pdf.columns), axis=0) gdf_sorted = gdf.sort_values(by=list(gdf.columns), axis=0) assert_eq(pdf_sorted, gdf_sorted) @pytest.mark.parametrize( "mask", [[True, False, False, False, False], [True, False, True, False, True]], ) @pytest.mark.parametrize("arg", ["a", slice("a", "a"), slice("a", "b")]) def test_dataframe_loc_mask(mask, arg): pdf = pd.DataFrame( {"a": ["a", "b", "c", "d", "e"], "b": ["f", "g", "h", "i", "j"]} ) gdf = cudf.DataFrame.from_pandas(pdf) assert_eq(pdf.loc[mask, arg], gdf.loc[mask, arg]) def test_dataframe_loc_outbound(): df = cudf.DataFrame() size = 10 df["a"] = ha = np.random.randint(low=0, high=100, size=size).astype( np.int32 ) df["b"] = hb = np.random.random(size).astype(np.float32) pdf = pd.DataFrame() pdf["a"] = ha pdf["b"] = hb assert_exceptions_equal(lambda: pdf.loc[11], lambda: df.loc[11]) def test_series_loc_numerical(): ps = pd.Series([1, 2, 3, 4, 5], index=[5, 6, 7, 8, 9]) gs = cudf.Series.from_pandas(ps) assert_eq(ps.loc[5], gs.loc[5]) assert_eq(ps.loc[6], gs.loc[6]) assert_eq(ps.loc[6:8], gs.loc[6:8]) assert_eq(ps.loc[:8], gs.loc[:8]) assert_eq(ps.loc[6:], gs.loc[6:]) assert_eq(ps.loc[::2], gs.loc[::2]) assert_eq(ps.loc[[5, 8, 9]], gs.loc[[5, 8, 9]]) assert_eq( ps.loc[[True, False, True, False, True]], gs.loc[[True, False, True, False, True]], ) assert_eq(ps.loc[[5, 8, 9]], gs.loc[cupy.array([5, 8, 9])]) def test_series_loc_float_index(): ps = pd.Series([1, 2, 3, 4, 5], index=[5.43, 6.34, 7.34, 8.0, 9.1]) gs = cudf.Series.from_pandas(ps) assert_eq(ps.loc[5.43], gs.loc[5.43]) assert_eq(ps.loc[8], gs.loc[8]) assert_eq(ps.loc[6.1:8], gs.loc[6.1:8]) assert_eq(ps.loc[:7.1], gs.loc[:7.1]) assert_eq(ps.loc[6.345:], gs.loc[6.345:]) assert_eq(ps.loc[::2], gs.loc[::2]) assert_eq( ps.loc[[True, False, True, False, True]], gs.loc[[True, False, True, False, True]], ) def test_series_loc_string(): ps = pd.Series( [1, 2, 3, 4, 5], index=["one", "two", "three", "four", "five"] ) gs = cudf.Series.from_pandas(ps) assert_eq(ps.loc["one"], gs.loc["one"]) assert_eq(ps.loc["five"], gs.loc["five"]) assert_eq(ps.loc["two":"four"], gs.loc["two":"four"]) assert_eq(ps.loc[:"four"], gs.loc[:"four"]) assert_eq(ps.loc["two":], gs.loc["two":]) assert_eq(ps.loc[::2], gs.loc[::2]) assert_eq(ps.loc[["one", "four", "five"]], gs.loc[["one", "four", "five"]]) assert_eq( ps.loc[[True, False, True, False, True]], gs.loc[[True, False, True, False, True]], ) def test_series_loc_datetime(): ps = pd.Series( [1, 2, 3, 4, 5], index=pd.date_range("20010101", "20010105") ) gs = cudf.Series.from_pandas(ps) # a few different ways of specifying a datetime label: assert_eq(ps.loc["20010101"], gs.loc["20010101"]) assert_eq(ps.loc["2001-01-01"], gs.loc["2001-01-01"]) assert_eq( ps.loc[pd.to_datetime("2001-01-01")], gs.loc[pd.to_datetime("2001-01-01")], ) assert_eq( ps.loc[np.datetime64("2001-01-01")], gs.loc[np.datetime64("2001-01-01")], ) assert_eq( ps.loc["2001-01-02":"2001-01-05"], gs.loc["2001-01-02":"2001-01-05"], check_freq=False, ) assert_eq(ps.loc["2001-01-02":], gs.loc["2001-01-02":], check_freq=False) assert_eq(ps.loc[:"2001-01-04"], gs.loc[:"2001-01-04"], check_freq=False) assert_eq(ps.loc[::2], gs.loc[::2], check_freq=False) assert_eq( ps.loc[["2001-01-01", "2001-01-04", "2001-01-05"]], gs.loc[["2001-01-01", "2001-01-04", "2001-01-05"]], ) assert_eq( ps.loc[ [ pd.to_datetime("2001-01-01"), pd.to_datetime("2001-01-04"), pd.to_datetime("2001-01-05"), ] ], gs.loc[ [ pd.to_datetime("2001-01-01"), pd.to_datetime("2001-01-04"), pd.to_datetime("2001-01-05"), ] ], ) assert_eq( ps.loc[[True, False, True, False, True]], gs.loc[[True, False, True, False, True]], check_freq=False, ) just_less_than_max = ps.index.max() - pd.Timedelta("5m") assert_eq( ps.loc[:just_less_than_max], gs.loc[:just_less_than_max], check_freq=False, ) def test_series_loc_categorical(): ps = pd.Series( [1, 2, 3, 4, 5], index=pd.Categorical(["a", "b", "c", "d", "e"]) ) gs = cudf.Series.from_pandas(ps) assert_eq(ps.loc["a"], gs.loc["a"]) assert_eq(ps.loc["e"], gs.loc["e"]) assert_eq(ps.loc["b":"d"], gs.loc["b":"d"]) assert_eq(ps.loc[:"d"], gs.loc[:"d"]) assert_eq(ps.loc["b":], gs.loc["b":]) assert_eq(ps.loc[::2], gs.loc[::2]) # order of categories changes, so we can only # compare values: assert_eq( ps.loc[["a", "d", "e"]].values, gs.loc[["a", "d", "e"]].to_numpy() ) assert_eq( ps.loc[[True, False, True, False, True]], gs.loc[[True, False, True, False, True]], ) @pytest.mark.parametrize( "obj", [ pd.DataFrame( {"a": [1, 2, 3, 4]}, index=pd.MultiIndex.from_frame( pd.DataFrame( {"A": [2, 3, 1, 4], "B": ["low", "high", "high", "low"]} ) ), ), pd.Series( [1, 2, 3, 4], index=pd.MultiIndex.from_frame( pd.DataFrame( {"A": [2, 3, 1, 4], "B": ["low", "high", "high", "low"]} ) ), ), ], ) def test_dataframe_series_loc_multiindex(obj): pindex = pd.MultiIndex.from_frame( pd.DataFrame({"A": [3, 2], "B": ["high", "low"]}) ) gobj = cudf.from_pandas(obj) gindex = cudf.MultiIndex.from_pandas(pindex) # cudf MultiIndex as arg expected = obj.loc[pindex] got = gobj.loc[gindex] assert_eq(expected, got) # pandas MultiIndex as arg expected = obj.loc[pindex] got = gobj.loc[pindex] assert_eq(expected, got) @pytest.mark.parametrize("nelem", [2, 5, 20, 100]) def test_series_iloc(nelem): # create random cudf.Series np.random.seed(12) ps = pd.Series(np.random.sample(nelem)) # gpu cudf.Series gs = cudf.Series(ps) # positive tests for indexing np.testing.assert_allclose(gs.iloc[-1 * nelem], ps.iloc[-1 * nelem]) np.testing.assert_allclose(gs.iloc[-1], ps.iloc[-1]) np.testing.assert_allclose(gs.iloc[0], ps.iloc[0]) np.testing.assert_allclose(gs.iloc[1], ps.iloc[1]) np.testing.assert_allclose(gs.iloc[nelem - 1], ps.iloc[nelem - 1]) # positive tests for slice np.testing.assert_allclose(gs.iloc[-1:1].to_numpy(), ps.iloc[-1:1]) np.testing.assert_allclose( gs.iloc[nelem - 1 : -1].to_numpy(), ps.iloc[nelem - 1 : -1] ) np.testing.assert_allclose( gs.iloc[0 : nelem - 1].to_pandas(), ps.iloc[0 : nelem - 1] ) np.testing.assert_allclose(gs.iloc[0:nelem].to_pandas(), ps.iloc[0:nelem]) np.testing.assert_allclose(gs.iloc[1:1].to_pandas(), ps.iloc[1:1]) np.testing.assert_allclose(gs.iloc[1:2].to_pandas(), ps.iloc[1:2].values) np.testing.assert_allclose( gs.iloc[nelem - 1 : nelem + 1].to_pandas(), ps.iloc[nelem - 1 : nelem + 1], ) np.testing.assert_allclose( gs.iloc[nelem : nelem * 2].to_pandas(), ps.iloc[nelem : nelem * 2] ) @pytest.mark.parametrize("nelem", [2, 5, 20, 100]) def test_dataframe_iloc(nelem): gdf = cudf.DataFrame() gdf["a"] = ha = np.random.randint(low=0, high=100, size=nelem).astype( np.int32 ) gdf["b"] = hb = np.random.random(nelem).astype(np.float32) pdf = pd.DataFrame() pdf["a"] = ha pdf["b"] = hb gdf.index.name = "index" pdf.index.name = "index" assert_eq(gdf.iloc[-1:1], pdf.iloc[-1:1]) assert_eq(gdf.iloc[nelem - 1 : -1], pdf.iloc[nelem - 1 : -1]) assert_eq(gdf.iloc[0 : nelem - 1], pdf.iloc[0 : nelem - 1]) assert_eq(gdf.iloc[0:nelem], pdf.iloc[0:nelem]) assert_eq(gdf.iloc[1:1], pdf.iloc[1:1]) assert_eq(gdf.iloc[1:2], pdf.iloc[1:2]) assert_eq(gdf.iloc[nelem - 1 : nelem + 1], pdf.iloc[nelem - 1 : nelem + 1]) assert_eq(gdf.iloc[nelem : nelem * 2], pdf.iloc[nelem : nelem * 2]) assert_eq(gdf.iloc[-1 * nelem], pdf.iloc[-1 * nelem]) assert_eq(gdf.iloc[-1], pdf.iloc[-1]) assert_eq(gdf.iloc[0], pdf.iloc[0]) assert_eq(gdf.iloc[1], pdf.iloc[1]) assert_eq(gdf.iloc[nelem - 1], pdf.iloc[nelem - 1]) # Repeat the above with iat[] assert_eq(gdf.iloc[-1:1], gdf.iat[-1:1]) assert_eq(gdf.iloc[nelem - 1 : -1], gdf.iat[nelem - 1 : -1]) assert_eq(gdf.iloc[0 : nelem - 1], gdf.iat[0 : nelem - 1]) assert_eq(gdf.iloc[0:nelem], gdf.iat[0:nelem]) assert_eq(gdf.iloc[1:1], gdf.iat[1:1]) assert_eq(gdf.iloc[1:2], gdf.iat[1:2]) assert_eq(gdf.iloc[nelem - 1 : nelem + 1], gdf.iat[nelem - 1 : nelem + 1]) assert_eq(gdf.iloc[nelem : nelem * 2], gdf.iat[nelem : nelem * 2]) assert_eq(gdf.iloc[-1 * nelem], gdf.iat[-1 * nelem]) assert_eq(gdf.iloc[-1], gdf.iat[-1]) assert_eq(gdf.iloc[0], gdf.iat[0]) assert_eq(gdf.iloc[1], gdf.iat[1]) assert_eq(gdf.iloc[nelem - 1], gdf.iat[nelem - 1]) # iloc with list like indexing assert_eq(gdf.iloc[[0]], pdf.iloc[[0]]) # iloc with column like indexing assert_eq(gdf.iloc[cudf.Series([0])], pdf.iloc[pd.Series([0])]) assert_eq(gdf.iloc[cudf.Series([0])._column], pdf.iloc[pd.Series([0])]) assert_eq(gdf.iloc[np.array([0])], pdf.loc[np.array([0])]) def test_dataframe_iloc_tuple(): gdf = cudf.DataFrame() nelem = 123 gdf["a"] = ha = np.random.randint(low=0, high=100, size=nelem).astype( np.int32 ) gdf["b"] = hb = np.random.random(nelem).astype(np.float32) pdf = pd.DataFrame() pdf["a"] = ha pdf["b"] = hb assert_eq(gdf.iloc[1, [1]], pdf.iloc[1, [1]], check_dtype=False) assert_eq(gdf.iloc[:, -1], pdf.iloc[:, -1]) def test_dataframe_iloc_index_error(): gdf = cudf.DataFrame() nelem = 123 gdf["a"] = ha = np.random.randint(low=0, high=100, size=nelem).astype( np.int32 ) gdf["b"] = hb = np.random.random(nelem).astype(np.float32) pdf = pd.DataFrame() pdf["a"] = ha pdf["b"] = hb with pytest.raises(IndexError): pdf.iloc[nelem * 2] with pytest.raises(IndexError): gdf.iloc[nelem * 2] @pytest.mark.parametrize("ntake", [0, 1, 10, 123, 122, 200]) def test_dataframe_take(ntake): np.random.seed(0) df = cudf.DataFrame() nelem = 123 df["ii"] = np.random.randint(0, 20, nelem) df["ff"] = np.random.random(nelem) take_indices = np.random.randint(0, len(df), ntake) actual = df.take(take_indices) expected = df.to_pandas().take(take_indices) assert actual.ii.null_count == 0 assert actual.ff.null_count == 0 assert_eq(actual, expected) @pytest.mark.parametrize("ntake", [1, 2, 8, 9]) def test_dataframe_take_with_multiindex(ntake): np.random.seed(0) df = cudf.DataFrame( index=cudf.MultiIndex( levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]], codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) ) nelem = 9 df["ii"] = np.random.randint(0, 20, nelem) df["ff"] = np.random.random(nelem) take_indices = np.random.randint(0, len(df), ntake) actual = df.take(take_indices) expected = df.to_pandas().take(take_indices) assert_eq(actual, expected) @pytest.mark.parametrize("ntake", [0, 1, 10, 123, 122, 200]) def test_series_take(ntake): np.random.seed(0) nelem = 123 psr = pd.Series(np.random.randint(0, 20, nelem)) gsr = cudf.Series(psr) take_indices = np.random.randint(0, len(gsr), ntake) actual = gsr.take(take_indices) expected = psr.take(take_indices) assert_eq(actual, expected) def test_series_take_positional(): psr = pd.Series([1, 2, 3, 4, 5], index=["a", "b", "c", "d", "e"]) gsr = cudf.Series.from_pandas(psr) take_indices = [1, 2, 0, 3] expect = psr.take(take_indices) got = gsr.take(take_indices) assert_eq(expect, got) @pytest.mark.parametrize("nelem", [0, 1, 5, 20, 100]) @pytest.mark.parametrize("slice_start", [None, 0, 1, 3, 10, -10]) @pytest.mark.parametrize("slice_end", [None, 0, 1, 30, 50, -1]) def test_dataframe_masked_slicing(nelem, slice_start, slice_end): gdf = cudf.DataFrame() gdf["a"] = list(range(nelem)) gdf["b"] = list(range(nelem, 2 * nelem)) gdf["a"] = gdf["a"]._column.set_mask(utils.random_bitmask(nelem)) gdf["b"] = gdf["b"]._column.set_mask(utils.random_bitmask(nelem)) def do_slice(x): return x[slice_start:slice_end] expect = do_slice(gdf.to_pandas()) got = do_slice(gdf).to_pandas() assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize("dtype", [int, float, str]) def test_empty_boolean_mask(dtype): gdf = cudf.datasets.randomdata(nrows=0, dtypes={"a": dtype}) pdf = gdf.to_pandas() compare_val = dtype(1) expected = pdf[pdf.a == compare_val] got = gdf[gdf.a == compare_val] assert_eq(expected, got) expected = pdf.a[pdf.a == compare_val] got = gdf.a[gdf.a == compare_val] assert_eq(expected, got) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4], [1.0, 2.0, 3.0, 4.0], ["one", "two", "three", "four"], pd.Series(["a", "b", "c", "d"], dtype="category"), pd.Series(pd.date_range("2010-01-01", "2010-01-04")), ], ) @pytest.mark.parametrize( "mask", [ [True, True, True, True], [False, False, False, False], [True, False, True, False], [True, False, False, True], np.array([True, False, True, False]), pd.Series([True, False, True, False]), cudf.Series([True, False, True, False]), ], ) @pytest.mark.parametrize("nulls", ["one", "some", "all", "none"]) def test_series_apply_boolean_mask(data, mask, nulls): psr = pd.Series(data) if len(data) > 0: if nulls == "one": p = np.random.randint(0, 4) psr[p] = None elif nulls == "some": p1, p2 = np.random.randint(0, 4, (2,)) psr[p1] = None psr[p2] = None elif nulls == "all": psr[:] = None gsr = cudf.from_pandas(psr) # TODO: from_pandas(psr) has dtype "float64" # when psr has dtype "object" and is all None if psr.dtype == "object" and nulls == "all": gsr = cudf.Series([None, None, None, None], dtype="object") if isinstance(mask, cudf.Series): expect = psr[mask.to_pandas()] else: expect = psr[mask] got = gsr[mask] assert_eq(expect, got) def test_dataframe_apply_boolean_mask(): pdf = pd.DataFrame( { "a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3], "c": ["a", None, "b", "c"], } ) gdf = cudf.DataFrame.from_pandas(pdf) assert_eq(pdf[[True, False, True, False]], gdf[[True, False, True, False]]) """ This test compares cudf and Pandas DataFrame boolean indexing. """ @pytest.mark.parametrize( "mask_fn", [lambda x: x, lambda x: np.array(x), lambda x: pd.Series(x)] ) def test_dataframe_boolean_mask(mask_fn): mask_base = [ True, False, True, False, True, False, True, False, True, False, ] pdf = pd.DataFrame({"x": range(10), "y": range(10)}) gdf = cudf.from_pandas(pdf) mask = mask_fn(mask_base) assert len(mask) == gdf.shape[0] pdf_masked = pdf[mask] gdf_masked = gdf[mask] assert pdf_masked.to_string().split() == gdf_masked.to_string().split() @pytest.mark.parametrize( "key, value", [ (0, 4), (1, 4), ([0, 1], 4), ([0, 1], [4, 5]), (slice(0, 2), [4, 5]), (slice(1, None), [4, 5, 6, 7]), ([], 1), ([], []), (slice(None, None), 1), (slice(-1, -3), 7), ], ) @pytest.mark.parametrize("nulls", ["none", "some", "all"]) def test_series_setitem_basics(key, value, nulls): psr = pd.Series([1, 2, 3, 4, 5]) if nulls == "some": psr[[0, 4]] = None elif nulls == "all": psr[:] = None gsr = cudf.from_pandas(psr) psr[key] = value gsr[key] = value assert_eq(psr, gsr, check_dtype=False) def test_series_setitem_null(): gsr = cudf.Series([1, 2, 3, 4]) gsr[0] = None expect = cudf.Series([None, 2, 3, 4]) got = gsr assert_eq(expect, got) gsr = cudf.Series([None, 2, 3, 4]) gsr[0] = 1 expect = cudf.Series([1, 2, 3, 4]) got = gsr assert_eq(expect, got) @pytest.mark.parametrize( "key, value", [ (0, 4), (1, 4), ([0, 1], 4), ([0, 1], [4, 5]), (slice(0, 2), [4, 5]), (slice(1, None), [4, 5, 6, 7]), ([], 1), ([], []), (slice(None, None), 1), (slice(-1, -3), 7), ], ) @pytest.mark.parametrize("nulls", ["none", "some", "all"]) def test_series_setitem_iloc(key, value, nulls): psr = pd.Series([1, 2, 3, 4, 5]) if nulls == "some": psr[[0, 4]] = None elif nulls == "all": psr[:] = None gsr = cudf.from_pandas(psr) psr.iloc[key] = value gsr.iloc[key] = value assert_eq(psr, gsr, check_dtype=False) @pytest.mark.parametrize( "key, value", [ pytest.param( 0, 0.5, ), ([0, 1], 0.5), ([0, 1], [0.5, 2.5]), (slice(0, 2), [0.5, 0.25]), ], ) def test_series_setitem_dtype(key, value): psr = pd.Series([1, 2, 3], dtype="int32") gsr = cudf.from_pandas(psr) psr[key] = value gsr[key] = value assert_eq(psr, gsr) def test_series_setitem_datetime(): psr = pd.Series(["2001", "2002", "2003"], dtype="datetime64[ns]") gsr = cudf.from_pandas(psr) psr[0] = np.datetime64("2005") gsr[0] = np.datetime64("2005") assert_eq(psr, gsr) def test_series_setitem_datetime_coerced(): psr = pd.Series(["2001", "2002", "2003"], dtype="datetime64[ns]") gsr = cudf.from_pandas(psr) psr[0] = "2005" gsr[0] = "2005" assert_eq(psr, gsr) def test_series_setitem_categorical(): psr = pd.Series(["a", "b", "a", "c", "d"], dtype="category") gsr = cudf.from_pandas(psr) psr[0] = "d" gsr[0] = "d" assert_eq(psr, gsr) psr = psr.cat.add_categories(["e"]) gsr = gsr.cat.add_categories(["e"]) psr[0] = "e" gsr[0] = "e" assert_eq(psr, gsr) psr[[0, 1]] = "b" gsr[[0, 1]] = "b" assert_eq(psr, gsr) psr[0:3] = "e" gsr[0:3] = "e" assert_eq(psr, gsr) @pytest.mark.parametrize( "key, value", [ (0, "d"), (0, "g"), ([0, 1], "g"), ([0, 1], None), (slice(None, 2), "g"), (slice(None, 2), ["g", None]), ], ) def test_series_setitem_string(key, value): psr = pd.Series(["a", "b", "c", "d", "e"]) gsr = cudf.from_pandas(psr) psr[key] = value gsr[key] = value assert_eq(psr, gsr) psr = pd.Series(["a", None, "c", "d", "e"]) gsr = cudf.from_pandas(psr) psr[key] = value gsr[key] = value assert_eq(psr, gsr) @pytest.mark.parametrize( "key, value", [ ("a", 4), ("b", 4), ("b", np.int8(8)), ("d", 4), ("d", np.int8(16)), ("d", np.float32(16)), (["a", "b"], 4), (["a", "b"], [4, 5]), ([True, False, True], 4), ([False, False, False], 4), ([True, False, True], [4, 5]), ], ) def test_series_setitem_loc(key, value): psr = pd.Series([1, 2, 3], ["a", "b", "c"]) gsr = cudf.from_pandas(psr) psr.loc[key] = value gsr.loc[key] = value assert_eq(psr, gsr) @pytest.mark.parametrize( "key, value", [ (1, "d"), (2, "e"), (4, "f"), ([1, 3], "g"), ([1, 3], ["g", "h"]), ([True, False, True], "i"), ([False, False, False], "j"), ([True, False, True], ["k", "l"]), ], ) def test_series_setitem_loc_numeric_index(key, value): psr = pd.Series(["a", "b", "c"], [1, 2, 3]) gsr = cudf.from_pandas(psr) psr.loc[key] = value gsr.loc[key] = value assert_eq(psr, gsr) @pytest.mark.parametrize( "key, value", [ ((0, 0), 5), ((slice(None), 0), 5), ((slice(None), 0), range(3)), ((slice(None, -1), 0), range(2)), (([0, 1], 0), 5), ], ) def test_dataframe_setitem_iloc(key, value, pdf_gdf): pdf, gdf = pdf_gdf pdf.iloc[key] = value gdf.iloc[key] = value assert_eq(pdf, gdf) @pytest.mark.parametrize( "key, value", [ (("one", "a"), 5), ((slice(None), "a"), 5), ((slice(None), "a"), range(3)), ((slice(None), "a"), [3, 2, 1]), ((slice(None, "two"), "a"), range(2)), ((slice(None, "two"), "a"), [4, 5]), ((["one", "two"], "a"), 5), (("one", "c"), 5), ((["one", "two"], "c"), 5), ((slice(None), "c"), 5), ((slice(None), "c"), range(3)), ((slice(None), "c"), [3, 2, 1]), ((slice(None, "two"), "c"), range(2)), ((slice(None, "two"), "c"), [4, 5]), ], ) def test_dataframe_setitem_loc(key, value, pdf_gdf): pdf, gdf = pdf_gdf pdf.loc[key] = value gdf.loc[key] = value assert_eq(pdf, gdf) @pytest.mark.parametrize( "key, value", [ (("one", "a"), 5), ((slice(None), "a"), range(3)), ((slice(None), "a"), [3, 2, 1]), ], ) def test_dataframe_setitem_loc_empty_df(key, value): pdf, gdf = pd.DataFrame(), cudf.DataFrame() pdf.loc[key] = value gdf.loc[key] = value assert_eq(pdf, gdf, check_dtype=False) @pytest.mark.parametrize( "key,value", [ ((0, 0), 5.0), ((slice(None), 0), 5.0), ((slice(None), 0), np.arange(7, dtype="float64")), ], ) def test_dataframe_setitem_iloc_multiindex(key, value, pdf_gdf_multi): pdf, gdf = pdf_gdf_multi pdf.iloc[key] = value gdf.iloc[key] = value assert_eq(pdf, gdf) def test_boolean_indexing_single_row(pdf_gdf): pdf, gdf = pdf_gdf assert_eq( pdf.loc[[True, False, False], :], gdf.loc[[True, False, False], :] ) def test_iloc_negative_indices(): psr = pd.Series([1, 2, 3, 4, 5]) gsr = cudf.from_pandas(psr) assert_eq(psr.iloc[[-1, -2, -4]], gsr.iloc[[-1, -2, -4]]) def test_out_of_bounds_indexing(): psr = pd.Series([1, 2, 3]) gsr = cudf.from_pandas(psr) assert_exceptions_equal( lambda: psr[[0, 1, 9]], lambda: gsr[[0, 1, 9]], ) assert_exceptions_equal( lambda: psr[[0, 1, -4]], lambda: gsr[[0, 1, -4]], ) assert_exceptions_equal( lambda: psr.__setitem__([0, 1, 9], 2), lambda: gsr.__setitem__([0, 1, 9], 2), ) assert_exceptions_equal( lambda: psr.__setitem__([0, 1, -4], 2), lambda: gsr.__setitem__([0, 1, -4], 2), ) assert_exceptions_equal( lambda: psr[4:6].iloc.__setitem__(-1, 2), lambda: gsr[4:6].iloc.__setitem__(-1, 2), ) assert_exceptions_equal( lambda: psr[4:6].iloc.__setitem__(1, 2), lambda: gsr[4:6].iloc.__setitem__(1, 2), ) def test_sliced_indexing(): a = list(range(4, 4 + 150)) b = list(range(0, 0 + 150)) pdf = pd.DataFrame({"a": a, "b": b}) gdf = cudf.DataFrame.from_pandas(pdf) pdf = pdf.set_index("a") gdf = gdf.set_index("a") pidx = pdf.index[:75] gidx = gdf.index[:75] assert_eq(pdf.loc[pidx], gdf.loc[gidx]) @pytest.mark.parametrize("index", [["a"], ["a", "a"], ["a", "a", "b", "c"]]) def test_iloc_categorical_index(index): gdf = cudf.DataFrame({"data": range(len(index))}, index=index) gdf.index = gdf.index.astype("category") pdf = gdf.to_pandas() expect = pdf.iloc[:, 0] got = gdf.iloc[:, 0] assert_eq(expect, got) @pytest.mark.parametrize( "sli", [ slice("2001", "2020"), slice("2001", "2002"), slice("2002", "2001"), slice(None, "2020"), slice("2001", None), ], ) @pytest.mark.parametrize("is_dataframe", [True, False]) def test_loc_datetime_index(sli, is_dataframe): if is_dataframe is True: pd_data = pd.DataFrame( {"a": [1, 2, 3]}, index=pd.Series(["2001", "2009", "2002"], dtype="datetime64[ns]"), ) else: pd_data = pd.Series( [1, 2, 3], pd.Series(["2001", "2009", "2002"], dtype="datetime64[ns]"), ) gd_data = cudf.from_pandas(pd_data) expect = pd_data.loc[sli] got = gd_data.loc[sli] assert_eq(expect, got) @pytest.mark.parametrize( "gdf_kwargs", [ {"data": {"a": range(1000)}}, {"data": {"a": range(1000), "b": range(1000)}}, { "data": { "a": range(20), "b": range(20), "c": ["abc", "def", "xyz", "def", "pqr"] * 4, } }, {"index": [1, 2, 3]}, {"index": range(1000)}, {"columns": ["a", "b", "c", "d"]}, {"columns": ["a"], "index": range(1000)}, {"columns": ["a", "col2", "...col n"], "index": range(1000)}, {"index": cudf.Series(range(1000)).astype("str")}, { "columns": ["a", "b", "c", "d"], "index": cudf.Series(range(1000)).astype("str"), }, ], ) @pytest.mark.parametrize( "slice", [ slice(6, None), # start but no stop, [6:] slice(None, None, 3), # only step, [::3] slice(1, 10, 2), # start, stop, step slice(3, -5, 2), # negative stop slice(-2, -4), # slice is empty slice(-10, -20, -1), # reversed slice slice(None), # slices everything, same as [:] slice(250, 500), slice(250, 251), slice(50), slice(1, 10), slice(10, 20), slice(15, 24), slice(6), ], ) def test_dataframe_sliced(gdf_kwargs, slice): gdf = cudf.DataFrame(**gdf_kwargs) pdf = gdf.to_pandas() actual = gdf[slice] expected = pdf[slice] assert_eq(actual, expected) @pytest.mark.parametrize( "gdf", [ cudf.DataFrame({"a": range(10000)}), cudf.DataFrame( { "a": range(10000), "b": range(10000), "c": range(10000), "d": range(10000), "e": range(10000), "f": range(10000), } ), cudf.DataFrame({"a": range(20), "b": range(20)}), cudf.DataFrame( { "a": range(20), "b": range(20), "c": ["abc", "def", "xyz", "def", "pqr"] * 4, } ), cudf.DataFrame(index=[1, 2, 3]), cudf.DataFrame(index=range(10000)), cudf.DataFrame(columns=["a", "b", "c", "d"]), cudf.DataFrame(columns=["a"], index=range(10000)), cudf.DataFrame(columns=["a", "col2", "...col n"], index=range(10000)), cudf.DataFrame(index=cudf.Series(range(10000)).astype("str")), cudf.DataFrame( columns=["a", "b", "c", "d"], index=cudf.Series(range(10000)).astype("str"), ), ], ) @pytest.mark.parametrize( "slice", [slice(6), slice(1), slice(7), slice(1, 3)], ) def test_dataframe_iloc_index(gdf, slice): pdf = gdf.to_pandas() actual = gdf.iloc[:, slice] expected = pdf.iloc[:, slice] assert_eq(actual, expected) @pytest.mark.parametrize( "data", [ [[0], [1], [2]], [[0, 1], [2, 3], [4, 5]], [[[0, 1], [2]], [[3, 4]], [[5, 6]]], [None, [[0, 1], [2]], [[3, 4], [5, 6]]], [[], [[0, 1], [2]], [[3, 4], [5, 6]]], [[], [["a", "b"], None], [["c", "d"], []]], ], ) @pytest.mark.parametrize( "key", [[], [0], [0, 1], [0, 1, 0], slice(None), slice(0, 2), slice(1, 3)] ) def test_iloc_with_lists(data, key): psr = pd.Series(data) gsr = cudf.Series(data) assert_eq(psr.iloc[key], gsr.iloc[key]) pdf = pd.DataFrame({"a": data, "b": data}) gdf = cudf.DataFrame({"a": data, "b": data}) assert_eq(pdf.iloc[key], gdf.iloc[key]) @pytest.mark.parametrize("key", [5, -10, "0", "a", np.array(5), np.array("a")]) def test_loc_bad_key_type(key): psr = pd.Series([1, 2, 3]) gsr = cudf.from_pandas(psr) assert_exceptions_equal(lambda: psr[key], lambda: gsr[key]) assert_exceptions_equal(lambda: psr.loc[key], lambda: gsr.loc[key]) @pytest.mark.parametrize("key", ["b", 1.0, np.array("b")]) def test_loc_bad_key_type_string_index(key): psr = pd.Series([1, 2, 3], index=["a", "1", "c"]) gsr = cudf.from_pandas(psr) assert_exceptions_equal(lambda: psr[key], lambda: gsr[key]) assert_exceptions_equal(lambda: psr.loc[key], lambda: gsr.loc[key]) def test_loc_zero_dim_array(): psr = pd.Series([1, 2, 3]) gsr = cudf.from_pandas(psr) assert_eq(psr[np.array(0)], gsr[np.array(0)]) assert_eq(psr[np.array([0])[0]], gsr[np.array([0])[0]]) @pytest.mark.parametrize( "arg", [ slice(None), slice((1, 2), None), slice(None, (1, 2)), (1, 1), pytest.param( (1, slice(None)), marks=pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/46704" ), ), 1, 2, ], ) def test_loc_series_multiindex(arg): gsr = cudf.DataFrame( {"a": [1, 1, 2], "b": [1, 2, 3], "c": ["a", "b", "c"]} ).set_index(["a", "b"])["c"] psr = gsr.to_pandas() assert_eq(psr.loc[arg], gsr.loc[arg]) @pytest.mark.parametrize( "arg", [ slice(None, None, -1), slice(None, -1, -1), slice(4, -1, -1), slice(None, None, -3), slice(None, -1, -3), slice(4, -1, -3), ], ) @pytest.mark.parametrize( "pobj", [pd.DataFrame({"a": [1, 2, 3, 4, 5]}), pd.Series([1, 2, 3, 4, 5])] ) def test_iloc_before_zero_terminate(arg, pobj): gobj = cudf.from_pandas(pobj) assert_eq(pobj.iloc[arg], gobj.iloc[arg]) def test_iloc_decimal(): sr = cudf.Series(["1.00", "2.00", "3.00", "4.00"]).astype( cudf.Decimal64Dtype(scale=2, precision=3) ) got = sr.iloc[[3, 2, 1, 0]] expect = cudf.Series( ["4.00", "3.00", "2.00", "1.00"], ).astype(cudf.Decimal64Dtype(scale=2, precision=3)) assert_eq(expect.reset_index(drop=True), got.reset_index(drop=True)) @pytest.mark.parametrize( ("key, value"), [ ( ([0], ["x", "y"]), [10, 20], ), ( ([0, 2], ["x", "y"]), [[10, 30], [20, 40]], ), ( (0, ["x", "y"]), [10, 20], ), ( ([0, 2], "x"), [10, 20], ), ], ) def test_dataframe_loc_inplace_update(key, value): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) pdf = gdf.to_pandas() actual = gdf.loc[key] = value expected = pdf.loc[key] = value assert_eq(expected, actual) def test_dataframe_loc_inplace_update_string_index(): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=list("abc")) pdf = gdf.to_pandas() actual = gdf.loc[["a"], ["x", "y"]] = [10, 20] expected = pdf.loc[["a"], ["x", "y"]] = [10, 20] assert_eq(expected, actual) @pytest.mark.parametrize( ("key, value"), [ ([0], [10, 20]), ([0, 2], [[10, 30], [20, 40]]), (([0, 2], [0, 1]), [[10, 30], [20, 40]]), (([0, 2], 0), [10, 30]), ((0, [0, 1]), [20, 40]), ], ) def test_dataframe_iloc_inplace_update(key, value): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) pdf = gdf.to_pandas() actual = gdf.iloc[key] = value expected = pdf.iloc[key] = value assert_eq(expected, actual) @pytest.mark.parametrize( "loc_key", [([0, 2], ["x", "y"])], ) @pytest.mark.parametrize( "iloc_key", [[0, 2]], ) @pytest.mark.parametrize( ("data, index"), [ ( {"x": [10, 20], "y": [30, 40]}, [0, 2], ) ], ) def test_dataframe_loc_iloc_inplace_update_with_RHS_dataframe( loc_key, iloc_key, data, index ): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) pdf = gdf.to_pandas() actual = gdf.loc[loc_key] = cudf.DataFrame(data, index=cudf.Index(index)) expected = pdf.loc[loc_key] = pd.DataFrame(data, index=pd.Index(index)) assert_eq(expected, actual) actual = gdf.iloc[iloc_key] = cudf.DataFrame(data, index=cudf.Index(index)) expected = pdf.iloc[iloc_key] = pd.DataFrame(data, index=pd.Index(index)) assert_eq(expected, actual) def test_dataframe_loc_inplace_update_with_invalid_RHS_df_columns(): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) pdf = gdf.to_pandas() actual = gdf.loc[[0, 2], ["x", "y"]] = cudf.DataFrame( {"b": [10, 20], "y": [30, 40]}, index=cudf.Index([0, 2]) ) expected = pdf.loc[[0, 2], ["x", "y"]] = pd.DataFrame( {"b": [10, 20], "y": [30, 40]}, index=pd.Index([0, 2]) ) assert_eq(expected, actual) @pytest.mark.parametrize( ("key, value"), [ (([0, 2], ["x", "y"]), [[10, 30, 50], [20, 40, 60]]), (([0], ["x", "y"]), [[10], [20]]), ], ) def test_dataframe_loc_inplace_update_shape_mismatch(key, value): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) with pytest.raises(ValueError, match="shape mismatch:"): gdf.loc[key] = value @pytest.mark.parametrize( ("key, value"), [ ([0, 2], [[10, 30, 50], [20, 40, 60]]), ([0], [[10], [20]]), ], ) def test_dataframe_iloc_inplace_update_shape_mismatch(key, value): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) with pytest.raises(ValueError, match="shape mismatch:"): gdf.iloc[key] = value def test_dataframe_loc_inplace_update_shape_mismatch_RHS_df(): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) with pytest.raises(ValueError, match="shape mismatch:"): gdf.loc[([0, 2], ["x", "y"])] = cudf.DataFrame( {"x": [10, 20]}, index=cudf.Index([0, 2]) ) def test_dataframe_iloc_inplace_update_shape_mismatch_RHS_df(): gdf = cudf.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) with pytest.raises(ValueError, match="shape mismatch:"): gdf.iloc[[0, 2]] = cudf.DataFrame( {"x": [10, 20]}, index=cudf.Index([0, 2]) ) @pytest.mark.parametrize( "array,is_error", [ (cupy.arange(20, 40).reshape(-1, 2), False), (cupy.arange(20, 50).reshape(-1, 3), True), (np.arange(20, 40).reshape(-1, 2), False), (np.arange(20, 30).reshape(-1, 1), False), (cupy.arange(20, 30).reshape(-1, 1), False), ], ) def test_dataframe_indexing_setitem_np_cp_array(array, is_error): gdf = cudf.DataFrame({"a": range(10), "b": range(10)}) pdf = gdf.to_pandas() if not is_error: gdf.loc[:, ["a", "b"]] = array pdf.loc[:, ["a", "b"]] = cupy.asnumpy(array) assert_eq(gdf, pdf) else: assert_exceptions_equal( lfunc=pdf.loc.__setitem__, rfunc=gdf.loc.__setitem__, lfunc_args_and_kwargs=( [(slice(None, None, None), ["a", "b"]), cupy.asnumpy(array)], {}, ), rfunc_args_and_kwargs=( [(slice(None, None, None), ["a", "b"]), array], {}, ), ) def test_iloc_single_row_with_nullable_column(): # see https://github.com/rapidsai/cudf/issues/11349 pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.4]}) df = cudf.from_pandas(pdf) df.iloc[0] # before the fix for #11349 this would segfault assert_eq(pdf.iloc[0], df.iloc[0]) def test_loc_single_row_from_slice(): # see https://github.com/rapidsai/cudf/issues/11930 pdf = pd.DataFrame({"a": [10, 20, 30], "b": [1, 2, 3]}).set_index("a") df = cudf.from_pandas(pdf) assert_eq(pdf.loc[5:10], df.loc[5:10]) @pytest.mark.parametrize("indexer", ["loc", "iloc"]) @pytest.mark.parametrize( "mask", [[False, True], [False, False, True, True, True]], ids=["too-short", "too-long"], ) def test_boolean_mask_wrong_length(indexer, mask): s = pd.Series([1, 2, 3, 4]) indexee = getattr(s, indexer) with pytest.raises(IndexError): indexee[mask] c = cudf.from_pandas(s) indexee = getattr(c, indexer) with pytest.raises(IndexError): indexee[mask] @pytest.mark.parametrize("indexer", ["loc", "iloc"]) def test_boolean_mask_columns(indexer): df = pd.DataFrame(np.zeros((3, 3))) cdf = cudf.from_pandas(df) mask = [True, False, True] expect = getattr(df, indexer)[:, mask] got = getattr(cdf, indexer)[:, mask] assert_eq(expect, got) @pytest.mark.parametrize("indexer", ["loc", "iloc"]) @pytest.mark.parametrize( "mask", [[False, True], [False, False, True, True, True]], ids=["too-short", "too-long"], ) def test_boolean_mask_columns_wrong_length(indexer, mask): df = pd.DataFrame(np.zeros((3, 3))) cdf = cudf.from_pandas(df) with pytest.raises(IndexError): getattr(df, indexer)[:, mask] with pytest.raises(IndexError): getattr(cdf, indexer)[:, mask] def test_boolean_mask_columns_iloc_series(): df = pd.DataFrame(np.zeros((3, 3))) cdf = cudf.from_pandas(df) mask = pd.Series([True, False, True], dtype=bool) with pytest.raises(NotImplementedError): df.iloc[:, mask] with pytest.raises(NotImplementedError): cdf.iloc[:, mask] @pytest.mark.parametrize("index_type", ["single", "slice"]) def test_loc_timestamp_issue_8585(index_type): # https://github.com/rapidsai/cudf/issues/8585 start = pd.Timestamp( datetime.strptime("2021-03-12 00:00", "%Y-%m-%d %H:%M") ) end = pd.Timestamp(datetime.strptime("2021-03-12 11:00", "%Y-%m-%d %H:%M")) timestamps = pd.date_range(start, end, periods=12) value = np.random.normal(size=12) df = pd.DataFrame(value, index=timestamps, columns=["value"]) cdf = cudf.from_pandas(df) if index_type == "single": index = pd.Timestamp( datetime.strptime("2021-03-12 03:00", "%Y-%m-%d %H:%M") ) elif index_type == "slice": index = slice(start, end, None) else: raise ValueError("Invalid index type") expect = df.loc[index] actual = cdf.loc[index] assert_eq(expect, actual) @pytest.mark.parametrize( "index_type", [ "single", pytest.param( "slice", marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/8585" ), ), pytest.param( "date_range", marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/8585" ), ), ], ) def test_loc_multiindex_timestamp_issue_8585(index_type): # https://github.com/rapidsai/cudf/issues/8585 start = pd.Timestamp( datetime.strptime("2021-03-12 00:00", "%Y-%m-%d %H:%M") ) end = pd.Timestamp(datetime.strptime("2021-03-12 03:00", "%Y-%m-%d %H:%M")) timestamps = pd.date_range(start, end, periods=4) labels = ["A", "B", "C"] index = pd.MultiIndex.from_product( [timestamps, labels], names=["timestamp", "label"] ) value = np.random.normal(size=12) df = pd.DataFrame(value, index=index, columns=["value"]) cdf = cudf.from_pandas(df) start = pd.Timestamp( datetime.strptime("2021-03-12 01:00", "%Y-%m-%d %H:%M") ) end = pd.Timestamp(datetime.strptime("2021-03-12 02:00", "%Y-%m-%d %H:%M")) if index_type == "single": index = pd.Timestamp( datetime.strptime("2021-03-12 03:00", "%Y-%m-%d %H:%M") ) elif index_type == "slice": index = slice(start, end, None) elif index_type == "date_range": index = pd.date_range(start, end, periods=2) else: raise ValueError("Invalid index type") expect = df.loc[index] actual = cdf.loc[index] assert_eq(expect, actual) def test_loc_repeated_index_label_issue_8693(): # https://github.com/rapidsai/cudf/issues/8693 s = pd.Series([1, 2, 3, 4], index=[0, 1, 1, 2]) cs = cudf.from_pandas(s) expect = s.loc[1] actual = cs.loc[1] assert_eq(expect, actual) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/13268") @pytest.mark.parametrize( "indexer", [(..., 0), (0, ...)], ids=["row_ellipsis", "column_ellipsis"] ) def test_loc_ellipsis_as_slice_issue_13268(indexer): # https://github.com/rapidsai/cudf/issues/13268 df = pd.DataFrame(np.arange(4).reshape(2, 2)) cdf = cudf.from_pandas(df) expect = df.loc[indexer] actual = cdf.loc[indexer] assert_eq(expect, actual) @pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/13269 " "and https://github.com/rapidsai/cudf/issues/13273" ) def test_loc_repeated_column_label_issue_13269(): # https://github.com/rapidsai/cudf/issues/13269 # https://github.com/rapidsai/cudf/issues/13273 df = pd.DataFrame(np.arange(4).reshape(2, 2)) cdf = cudf.from_pandas(df) expect = df.loc[:, [0, 1, 0]] actual = cdf.loc[:, [0, 1, 0]] assert_eq(expect, actual) def test_loc_column_boolean_mask_issue_13270(): # https://github.com/rapidsai/cudf/issues/13270 df = pd.DataFrame(np.arange(4).reshape(2, 2)) cdf = cudf.from_pandas(df) expect = df.loc[:, [True, True]] actual = cdf.loc[:, [True, True]] assert_eq(expect, actual) @pytest.mark.parametrize("indexer", [[1], [0, 2]]) def test_iloc_integer_categorical_issue_13013(indexer): # https://github.com/rapidsai/cudf/issues/13013 s = pd.Series([0, 1, 2]) index = pd.Categorical(indexer) expect = s.iloc[index] c = cudf.from_pandas(s) actual = c.iloc[index] assert_eq(expect, actual) def test_iloc_incorrect_boolean_mask_length_issue_13015(): # https://github.com/rapidsai/cudf/issues/13015 s = pd.Series([0, 1, 2]) with pytest.raises(IndexError): s.iloc[[True, False]] c = cudf.from_pandas(s) with pytest.raises(IndexError): c.iloc[[True, False]] def test_iloc_column_boolean_mask_issue_13265(): # https://github.com/rapidsai/cudf/issues/13265 df = pd.DataFrame(np.arange(4).reshape(2, 2)) cdf = cudf.from_pandas(df) expect = df.iloc[:, [True, True]] actual = cdf.iloc[:, [True, True]] assert_eq(expect, actual) def test_iloc_repeated_column_label_issue_13266(): # https://github.com/rapidsai/cudf/issues/13266 # https://github.com/rapidsai/cudf/issues/13273 df = pd.DataFrame(np.arange(4).reshape(2, 2)) cdf = cudf.from_pandas(df) with pytest.raises(NotImplementedError): cdf.iloc[:, [0, 1, 0]] @pytest.mark.parametrize( "indexer", [ (..., 0), (0, ...), ], ids=["row_ellipsis", "column_ellipsis"], ) def test_iloc_ellipsis_as_slice_issue_13267(indexer): # https://github.com/rapidsai/cudf/issues/13267 df = pd.DataFrame(np.arange(4).reshape(2, 2)) cdf = cudf.from_pandas(df) expect = df.iloc[indexer] actual = cdf.iloc[indexer] assert_eq(expect, actual) @pytest.mark.parametrize( "indexer", [ 0, (slice(None), 0), ([0, 2], 1), (slice(None), slice(None)), (slice(None), [1, 0]), (0, 0), (1, [1, 0]), ([1, 0], 0), ([1, 2], [0, 1]), ], ) def test_iloc_multiindex_lookup_as_label_issue_13515(indexer): # https://github.com/rapidsai/cudf/issues/13515 df = pd.DataFrame( {"a": [1, 1, 3], "b": [2, 3, 4], "c": [1, 6, 7], "d": [1, 8, 9]} ).set_index(["a", "b"]) cdf = cudf.from_pandas(df) expect = df.iloc[indexer] actual = cdf.iloc[indexer] assert_eq(expect, actual) def test_loc_unsorted_index_slice_lookup_keyerror_issue_12833(): # https://github.com/rapidsai/cudf/issues/12833 df = pd.DataFrame({"a": [1, 2, 3]}, index=[7, 0, 4]) cdf = cudf.from_pandas(df) # Check that pandas don't change their mind with pytest.raises(KeyError): df.loc[1:5] with pytest.raises(KeyError): cdf.loc[1:5] @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/13379") @pytest.mark.parametrize("index", [range(5), list(range(5))]) def test_loc_missing_label_keyerror_issue_13379(index): # https://github.com/rapidsai/cudf/issues/13379 df = pd.DataFrame({"a": index}, index=index) cdf = cudf.from_pandas(df) # Check that pandas don't change their mind with pytest.raises(KeyError): df.loc[[0, 5]] with pytest.raises(KeyError): cdf.loc[[0, 5]] @pytest.mark.parametrize("series", [True, False], ids=["Series", "DataFrame"]) def test_loc_repeated_label_ordering_issue_13658(series): # https://github.com/rapidsai/cudf/issues/13658 values = range(2048) index = [1 for _ in values] if series: frame = cudf.Series(values, index=index) else: frame = cudf.DataFrame({"a": values}, index=index) expect = frame.to_pandas().loc[[1]] actual = frame.loc[[1]] assert_eq(actual, expect) @pytest.mark.parametrize("index", [None, [2, 1, 3, 5, 4]]) def test_loc_bool_key_numeric_index_raises(index): ser = cudf.Series(range(5), index=index) with pytest.raises(KeyError): ser.loc[True] class TestLocIndexWithOrder: # https://github.com/rapidsai/cudf/issues/12833 @pytest.fixture(params=["increasing", "decreasing", "neither"]) def order(self, request): return request.param @pytest.fixture(params=[-1, 1], ids=["reverse", "forward"]) def take_order(self, request): return request.param @pytest.fixture(params=["float", "int", "string", "range"]) def dtype(self, request): return request.param @pytest.fixture def index(self, order, dtype): if dtype == "string": index = ["a", "h", "f", "z"] elif dtype == "int": index = [-1, 10, 7, 14] elif dtype == "float": index = [-1.5, 7.10, 2.4, 11.2] elif dtype == "range": if order == "increasing": return cudf.RangeIndex(2, 10, 3) elif order == "decreasing": return cudf.RangeIndex(10, 1, -3) else: return cudf.RangeIndex(10, 20, 3) else: raise ValueError(f"Unhandled index dtype {dtype}") if order == "decreasing": return sorted(index, reverse=True) elif order == "increasing": return sorted(index) elif order == "neither": return index else: raise ValueError(f"Unhandled index order {order}") @pytest.fixture def df(self, index): return cudf.DataFrame({"a": range(len(index))}, index=index) def test_loc_index_inindex_slice(self, df, take_order): pdf = df.to_pandas() lo = pdf.index[1] hi = pdf.index[-2] expect = pdf.loc[lo:hi:take_order] actual = df.loc[lo:hi:take_order] assert_eq(expect, actual) def test_loc_index_inindex_subset(self, df, take_order): pdf = df.to_pandas() vals = [pdf.index[0], pdf.index[2]][::take_order] expect = pdf.loc[vals] actual = df.loc[vals] assert_eq(expect, actual) def test_loc_index_notinindex_slice( self, request, df, order, dtype, take_order ): pdf = df.to_pandas() lo = pdf.index[1] hi = pdf.index[-2] if isinstance(lo, str): lo = chr(ord(lo) - 1) hi = chr(ord(hi) + 1) else: lo -= 1 hi += 1 if order == "neither" and dtype != "range": with pytest.raises(KeyError): pdf.loc[lo:hi:take_order] with pytest.raises(KeyError): df.loc[lo:hi:take_order] else: expect = pdf.loc[lo:hi:take_order] actual = df.loc[lo:hi:take_order] assert_eq(expect, actual) @pytest.mark.parametrize( "arg", [ (2, ("one", "second")), (slice(None, None, None), ("two", "first")), (1, ("one", "first")), (slice(None, None, None), ("two", "second")), (slice(None, None, None), ("two", "first", "three")), (3, ("two", "first", "three")), (slice(None, None, None), ("two",)), (0, ("two",)), ], ) def test_loc_dataframe_column_multiindex(arg): gdf = cudf.DataFrame( [list("abcd"), list("efgh"), list("ijkl"), list("mnop")], columns=cudf.MultiIndex.from_product( [["one", "two"], ["first", "second"], ["three"]] ), ) pdf = gdf.to_pandas() assert_eq(gdf.loc[arg], pdf.loc[arg]) @pytest.mark.parametrize( "arg", [slice(2, 4), slice(2, 5), slice(2.3, 5), slice(4.6, 6)] ) def test_series_iloc_float_int(arg): gs = cudf.Series(range(4), index=[2.0, 3.0, 4.5, 5.5]) ps = gs.to_pandas() actual = gs.loc[arg] expected = ps.loc[arg] assert_eq(actual, expected) def test_iloc_loc_mixed_dtype(): df = cudf.DataFrame({"a": ["a", "b"], "b": [0, 1]}) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(TypeError): df.iloc[0] with pytest.raises(TypeError): df.loc[0] df = df.astype("str") pdf = df.to_pandas() assert_eq(df.iloc[0], pdf.iloc[0]) assert_eq(df.loc[0], pdf.loc[0]) def test_loc_setitem_categorical_integer_not_position_based(): gdf = cudf.DataFrame(range(3), index=cudf.CategoricalIndex([1, 2, 3])) pdf = gdf.to_pandas() gdf.loc[1] = 10 pdf.loc[1] = 10 assert_eq(gdf, pdf) @pytest.mark.parametrize("typ", ["datetime64[ns]", "timedelta64[ns]"]) @pytest.mark.parametrize("idx_method, key", [["iloc", 0], ["loc", "a"]]) def test_series_iloc_scalar_datetimelike_return_pd_scalar( typ, idx_method, key ): obj = cudf.Series([1, 2, 3], index=list("abc"), dtype=typ) with cudf.option_context("mode.pandas_compatible", True): result = getattr(obj, idx_method)[key] expected = getattr(obj.to_pandas(), idx_method)[key] assert result == expected @pytest.mark.parametrize("typ", ["datetime64[ns]", "timedelta64[ns]"]) @pytest.mark.parametrize( "idx_method, row_key, col_key", [["iloc", 0, 0], ["loc", "a", "a"]] ) def test_dataframe_iloc_scalar_datetimelike_return_pd_scalar( typ, idx_method, row_key, col_key ): obj = cudf.DataFrame( [1, 2, 3], index=list("abc"), columns=["a"], dtype=typ ) with cudf.option_context("mode.pandas_compatible", True): result = getattr(obj, idx_method)[row_key, col_key] expected = getattr(obj.to_pandas(), idx_method)[row_key, col_key] assert result == expected @pytest.mark.parametrize("idx_method, key", [["iloc", 0], ["loc", "a"]]) def test_series_iloc_scalar_interval_return_pd_scalar(idx_method, key): iidx = cudf.IntervalIndex.from_breaks([1, 2, 3]) obj = cudf.Series(iidx, index=list("ab")) with cudf.option_context("mode.pandas_compatible", True): result = getattr(obj, idx_method)[key] expected = getattr(obj.to_pandas(), idx_method)[key] assert result == expected @pytest.mark.parametrize( "idx_method, row_key, col_key", [["iloc", 0, 0], ["loc", "a", "a"]] ) def test_dataframe_iloc_scalar_interval_return_pd_scalar( idx_method, row_key, col_key ): iidx = cudf.IntervalIndex.from_breaks([1, 2, 3]) obj = cudf.DataFrame({"a": iidx}, index=list("ab")) with cudf.option_context("mode.pandas_compatible", True): result = getattr(obj, idx_method)[row_key, col_key] expected = getattr(obj.to_pandas(), idx_method)[row_key, col_key] assert result == expected def test_scalar_loc_row_categoricalindex(): df = cudf.DataFrame( range(4), index=cudf.CategoricalIndex(["a", "a", "b", "c"]) ) result = df.loc["a"] expected = df.to_pandas().loc["a"] assert_eq(result, expected)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_copying.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import cupy as cp import numpy as np import pandas as pd import pytest import cudf from cudf import Series from cudf.testing._utils import NUMERIC_TYPES, OTHER_TYPES, assert_eq @pytest.mark.parametrize("dtype", NUMERIC_TYPES + OTHER_TYPES) def test_repeat(dtype): arr = np.random.rand(10) * 10 repeats = np.random.randint(10, size=10) psr = pd.Series(arr).astype(dtype) gsr = cudf.from_pandas(psr) assert_eq(psr.repeat(repeats), gsr.repeat(repeats)) def test_repeat_index(): arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]] psr = pd.MultiIndex.from_arrays(arrays, names=("number", "color")) gsr = cudf.from_pandas(psr) repeats = np.random.randint(10, size=4) assert_eq(psr.repeat(repeats), gsr.repeat(repeats)) def test_repeat_dataframe(): psr = pd.DataFrame({"a": [1, 1, 2, 2]}) gsr = cudf.from_pandas(psr) repeats = np.random.randint(10, size=4) # pd.DataFrame doesn't have repeat() so as a workaround, we are # comparing pd.Series.repeat() with cudf.DataFrame.repeat()['a'] assert_eq(psr["a"].repeat(repeats), gsr.repeat(repeats)["a"]) @pytest.mark.parametrize("dtype", NUMERIC_TYPES) def test_repeat_scalar(dtype): arr = np.random.rand(10) * 10 repeats = 10 psr = pd.Series(arr).astype(dtype) gsr = cudf.from_pandas(psr) assert_eq(psr.repeat(repeats), gsr.repeat(repeats)) def test_null_copy(): col = Series(np.arange(2049)) col[:] = None assert len(col) == 2049 def test_series_setitem_cow_on(): with cudf.option_context("copy_on_write", True): actual = cudf.Series([1, 2, 3, 4, 5]) new_copy = actual.copy(deep=False) actual[1] = 100 assert_eq(actual, cudf.Series([1, 100, 3, 4, 5])) assert_eq(new_copy, cudf.Series([1, 2, 3, 4, 5])) def test_series_setitem_cow_off(): with cudf.option_context("copy_on_write", False): actual = cudf.Series([1, 2, 3, 4, 5]) new_copy = actual.copy(deep=False) actual[1] = 100 assert_eq(actual, cudf.Series([1, 100, 3, 4, 5])) assert_eq(new_copy, cudf.Series([1, 100, 3, 4, 5])) def test_series_setitem_both_slice_cow_on(): with cudf.option_context("copy_on_write", True): actual = cudf.Series([1, 2, 3, 4, 5]) new_copy = actual.copy(deep=False) actual[slice(0, 2, 1)] = 100 assert_eq(actual, cudf.Series([100, 100, 3, 4, 5])) assert_eq(new_copy, cudf.Series([1, 2, 3, 4, 5])) new_copy[slice(2, 4, 1)] = 300 assert_eq(actual, cudf.Series([100, 100, 3, 4, 5])) assert_eq(new_copy, cudf.Series([1, 2, 300, 300, 5])) def test_series_setitem_both_slice_cow_off(): with cudf.option_context("copy_on_write", False): actual = cudf.Series([1, 2, 3, 4, 5]) new_copy = actual.copy(deep=False) actual[slice(0, 2, 1)] = 100 assert_eq(actual, cudf.Series([100, 100, 3, 4, 5])) assert_eq(new_copy, cudf.Series([100, 100, 3, 4, 5])) new_copy[slice(2, 4, 1)] = 300 assert_eq(actual, cudf.Series([100, 100, 300, 300, 5])) assert_eq(new_copy, cudf.Series([100, 100, 300, 300, 5])) def test_series_setitem_partial_slice_cow_on(): with cudf.option_context("copy_on_write", True): actual = cudf.Series([1, 2, 3, 4, 5]) new_copy = actual.copy(deep=False) new_copy[slice(2, 4, 1)] = 300 assert_eq(actual, cudf.Series([1, 2, 3, 4, 5])) assert_eq(new_copy, cudf.Series([1, 2, 300, 300, 5])) new_slice = actual[2:] # TODO: when COW and spilling has been unified, find a clean way to # test this without accessing the internal attributes _base and _ptr assert ( new_slice._column.base_data._base._ptr == actual._column.base_data._base._ptr ) new_slice[0:2] = 10 assert_eq(new_slice, cudf.Series([10, 10, 5], index=[2, 3, 4])) assert_eq(actual, cudf.Series([1, 2, 3, 4, 5])) def test_series_setitem_partial_slice_cow_off(): with cudf.option_context("copy_on_write", False): actual = cudf.Series([1, 2, 3, 4, 5]) new_copy = actual.copy(deep=False) new_copy[slice(2, 4, 1)] = 300 assert_eq(actual, cudf.Series([1, 2, 300, 300, 5])) assert_eq(new_copy, cudf.Series([1, 2, 300, 300, 5])) new_slice = actual[2:] assert ( new_slice._column.base_data._ptr == actual._column.base_data._ptr ) new_slice[0:2] = 10 assert_eq(new_slice, cudf.Series([10, 10, 5], index=[2, 3, 4])) assert_eq(actual, cudf.Series([1, 2, 10, 10, 5])) def test_multiple_series_cow(): with cudf.option_context("copy_on_write", True): # Verify constructing, modifying, deleting # multiple copies of a series preserves # the data appropriately when COW is enabled. s = cudf.Series([10, 20, 30, 40, 50]) s1 = s.copy(deep=False) s2 = s.copy(deep=False) s3 = s.copy(deep=False) s4 = s2.copy(deep=False) s5 = s4.copy(deep=False) s6 = s3.copy(deep=False) s1[0:3] = 10000 # s1 will be unlinked from actual data in s, # and then modified. Rest all should # contain the original data. assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) for ser in [s, s2, s3, s4, s5, s6]: assert_eq(ser, cudf.Series([10, 20, 30, 40, 50])) s6[0:3] = 3000 # s6 will be unlinked from actual data in s, # and then modified. Rest all should # contain the original data. assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) for ser in [s2, s3, s4, s5]: assert_eq(ser, cudf.Series([10, 20, 30, 40, 50])) s2[1:4] = 4000 # s2 will be unlinked from actual data in s, # and then modified. Rest all should # contain the original data. assert_eq(s2, cudf.Series([10, 4000, 4000, 4000, 50])) assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) for ser in [s3, s4, s5]: assert_eq(ser, cudf.Series([10, 20, 30, 40, 50])) s4[2:4] = 5000 # s4 will be unlinked from actual data in s, # and then modified. Rest all should # contain the original data. assert_eq(s4, cudf.Series([10, 20, 5000, 5000, 50])) assert_eq(s2, cudf.Series([10, 4000, 4000, 4000, 50])) assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) for ser in [s3, s5]: assert_eq(ser, cudf.Series([10, 20, 30, 40, 50])) s5[2:4] = 6000 # s5 will be unlinked from actual data in s, # and then modified. Rest all should # contain the original data. assert_eq(s5, cudf.Series([10, 20, 6000, 6000, 50])) assert_eq(s4, cudf.Series([10, 20, 5000, 5000, 50])) assert_eq(s2, cudf.Series([10, 4000, 4000, 4000, 50])) assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) for ser in [s3]: assert_eq(ser, cudf.Series([10, 20, 30, 40, 50])) s7 = s5.copy(deep=False) assert_eq(s7, cudf.Series([10, 20, 6000, 6000, 50])) s7[1:3] = 55 # Making a copy of s5, i.e., s7 and modifying shouldn't # be touching/modifying data in other series. assert_eq(s7, cudf.Series([10, 55, 55, 6000, 50])) assert_eq(s4, cudf.Series([10, 20, 5000, 5000, 50])) assert_eq(s2, cudf.Series([10, 4000, 4000, 4000, 50])) assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) for ser in [s3]: assert_eq(ser, cudf.Series([10, 20, 30, 40, 50])) # Deleting any of the following series objects # shouldn't delete rest of the weekly referenced data # elsewhere. del s2 assert_eq(s1, cudf.Series([10000, 10000, 10000, 40, 50])) assert_eq(s3, cudf.Series([10, 20, 30, 40, 50])) assert_eq(s4, cudf.Series([10, 20, 5000, 5000, 50])) assert_eq(s5, cudf.Series([10, 20, 6000, 6000, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) assert_eq(s7, cudf.Series([10, 55, 55, 6000, 50])) del s4 del s1 assert_eq(s3, cudf.Series([10, 20, 30, 40, 50])) assert_eq(s5, cudf.Series([10, 20, 6000, 6000, 50])) assert_eq(s6, cudf.Series([3000, 3000, 3000, 40, 50])) assert_eq(s7, cudf.Series([10, 55, 55, 6000, 50])) del s del s6 assert_eq(s3, cudf.Series([10, 20, 30, 40, 50])) assert_eq(s5, cudf.Series([10, 20, 6000, 6000, 50])) assert_eq(s7, cudf.Series([10, 55, 55, 6000, 50])) del s5 assert_eq(s3, cudf.Series([10, 20, 30, 40, 50])) assert_eq(s7, cudf.Series([10, 55, 55, 6000, 50])) del s3 assert_eq(s7, cudf.Series([10, 55, 55, 6000, 50])) def test_series_zero_copy_cow_on(): with cudf.option_context("copy_on_write", True): s = cudf.Series([1, 2, 3, 4, 5]) s1 = s.copy(deep=False) cp_array = cp.asarray(s) # Ensure all original data & zero-copied # data is same. assert_eq(s, cudf.Series([1, 2, 3, 4, 5])) assert_eq(s1, cudf.Series([1, 2, 3, 4, 5])) assert_eq(cp_array, cp.array([1, 2, 3, 4, 5])) cp_array[0:3] = 10 # Modifying a zero-copied array should only # modify `s` and will leave rest of the copies # untouched. assert_eq(s, cudf.Series([10, 10, 10, 4, 5])) assert_eq(s1, cudf.Series([1, 2, 3, 4, 5])) assert_eq(cp_array, cp.array([10, 10, 10, 4, 5])) s2 = cudf.Series(cp_array) assert_eq(s2, cudf.Series([10, 10, 10, 4, 5])) s3 = s2.copy(deep=False) cp_array[0] = 20 # Modifying a zero-copied array should modify # `s2` and `s` only. Because `cp_array` # is zero-copy shared with `s` & `s2`. assert_eq(s, cudf.Series([20, 10, 10, 4, 5])) assert_eq(s1, cudf.Series([1, 2, 3, 4, 5])) assert_eq(cp_array, cp.array([20, 10, 10, 4, 5])) assert_eq(s2, cudf.Series([20, 10, 10, 4, 5])) assert_eq(s3, cudf.Series([10, 10, 10, 4, 5])) s4 = cudf.Series([10, 20, 30, 40, 50]) s5 = cudf.Series(s4) assert_eq(s5, cudf.Series([10, 20, 30, 40, 50])) s5[0:2] = 1 # Modifying `s5` should also modify `s4` # because they are zero-copied. assert_eq(s5, cudf.Series([1, 1, 30, 40, 50])) assert_eq(s4, cudf.Series([1, 1, 30, 40, 50])) def test_series_zero_copy_cow_off(): with cudf.option_context("copy_on_write", False): s = cudf.Series([1, 2, 3, 4, 5]) s1 = s.copy(deep=False) cp_array = cp.asarray(s) # Ensure all original data & zero-copied # data is same. assert_eq(s, cudf.Series([1, 2, 3, 4, 5])) assert_eq(s1, cudf.Series([1, 2, 3, 4, 5])) assert_eq(cp_array, cp.array([1, 2, 3, 4, 5])) cp_array[0:3] = 10 # When COW is off, modifying a zero-copied array # will need to modify `s` & `s1` since they are # shallow copied. assert_eq(s, cudf.Series([10, 10, 10, 4, 5])) assert_eq(s1, cudf.Series([10, 10, 10, 4, 5])) assert_eq(cp_array, cp.array([10, 10, 10, 4, 5])) s2 = cudf.Series(cp_array) assert_eq(s2, cudf.Series([10, 10, 10, 4, 5])) s3 = s2.copy(deep=False) cp_array[0] = 20 # Modifying `cp_array`, will propagate the changes # across all Series objects, because they are # either shallow copied or zero-copied. assert_eq(s, cudf.Series([20, 10, 10, 4, 5])) assert_eq(s1, cudf.Series([20, 10, 10, 4, 5])) assert_eq(cp_array, cp.array([20, 10, 10, 4, 5])) assert_eq(s2, cudf.Series([20, 10, 10, 4, 5])) assert_eq(s3, cudf.Series([20, 10, 10, 4, 5])) s4 = cudf.Series([10, 20, 30, 40, 50]) s5 = cudf.Series(s4) assert_eq(s5, cudf.Series([10, 20, 30, 40, 50])) s5[0:2] = 1 # Modifying `s5` should also modify `s4` # because they are zero-copied. assert_eq(s5, cudf.Series([1, 1, 30, 40, 50])) assert_eq(s4, cudf.Series([1, 1, 30, 40, 50])) @pytest.mark.parametrize("copy_on_write", [True, False]) def test_series_str_copy(copy_on_write): original_cow_setting = cudf.get_option("copy_on_write") cudf.set_option("copy_on_write", copy_on_write) s = cudf.Series(["a", "b", "c", "d", "e"]) s1 = s.copy(deep=True) s2 = s.copy(deep=True) assert_eq(s, cudf.Series(["a", "b", "c", "d", "e"])) assert_eq(s1, cudf.Series(["a", "b", "c", "d", "e"])) assert_eq(s2, cudf.Series(["a", "b", "c", "d", "e"])) s[0:3] = "abc" assert_eq(s, cudf.Series(["abc", "abc", "abc", "d", "e"])) assert_eq(s1, cudf.Series(["a", "b", "c", "d", "e"])) assert_eq(s2, cudf.Series(["a", "b", "c", "d", "e"])) s2[1:4] = "xyz" assert_eq(s, cudf.Series(["abc", "abc", "abc", "d", "e"])) assert_eq(s1, cudf.Series(["a", "b", "c", "d", "e"])) assert_eq(s2, cudf.Series(["a", "xyz", "xyz", "xyz", "e"])) cudf.set_option("copy_on_write", original_cow_setting) @pytest.mark.parametrize("copy_on_write", [True, False]) def test_series_cat_copy(copy_on_write): original_cow_setting = cudf.get_option("copy_on_write") cudf.set_option("copy_on_write", copy_on_write) s = cudf.Series([10, 20, 30, 40, 50], dtype="category") s1 = s.copy(deep=True) s2 = s1.copy(deep=True) s3 = s1.copy(deep=True) s[0] = 50 assert_eq(s, cudf.Series([50, 20, 30, 40, 50], dtype=s.dtype)) assert_eq(s1, cudf.Series([10, 20, 30, 40, 50], dtype="category")) assert_eq(s2, cudf.Series([10, 20, 30, 40, 50], dtype="category")) assert_eq(s3, cudf.Series([10, 20, 30, 40, 50], dtype="category")) s2[3] = 10 s3[2:5] = 20 assert_eq(s, cudf.Series([50, 20, 30, 40, 50], dtype=s.dtype)) assert_eq(s1, cudf.Series([10, 20, 30, 40, 50], dtype=s.dtype)) assert_eq(s2, cudf.Series([10, 20, 30, 10, 50], dtype=s.dtype)) assert_eq(s3, cudf.Series([10, 20, 20, 20, 20], dtype=s.dtype)) cudf.set_option("copy_on_write", original_cow_setting) def test_dataframe_cow_slice_setitem(): with cudf.option_context("copy_on_write", True): df = cudf.DataFrame( {"a": [10, 11, 12, 13, 14], "b": [20, 30, 40, 50, 60]} ) slice_df = df[1:4] assert_eq( slice_df, cudf.DataFrame( {"a": [11, 12, 13], "b": [30, 40, 50]}, index=[1, 2, 3] ), ) slice_df["a"][2] = 1111 assert_eq( slice_df, cudf.DataFrame( {"a": [11, 1111, 13], "b": [30, 40, 50]}, index=[1, 2, 3] ), ) assert_eq( df, cudf.DataFrame( {"a": [10, 11, 12, 13, 14], "b": [20, 30, 40, 50, 60]} ), )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_dropna.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import ( _create_pandas_series_float64_default, assert_eq, ) @pytest.mark.parametrize( "data", [ [], [1.0, 2, None, 4], ["one", "two", "three", "four"], pd.Series(["a", "b", "c", "d"], dtype="category"), pd.Series(pd.date_range("2010-01-01", "2010-01-04")), ], ) @pytest.mark.parametrize("nulls", ["one", "some", "all", "none"]) @pytest.mark.parametrize("inplace", [True, False]) def test_dropna_series(data, nulls, inplace): psr = _create_pandas_series_float64_default(data) if len(data) > 0: if nulls == "one": p = np.random.randint(0, 4) psr[p] = None elif nulls == "some": p1, p2 = np.random.randint(0, 4, (2,)) psr[p1] = None psr[p2] = None elif nulls == "all": psr[:] = None gsr = cudf.from_pandas(psr) check_dtype = True if gsr.null_count == len(gsr): check_dtype = False expected = psr.dropna() actual = gsr.dropna() if inplace: expected = psr actual = gsr assert_eq(expected, actual, check_dtype=check_dtype) @pytest.mark.parametrize( "data", [ {"a": [1, 2, None]}, {"a": [1, 2, None], "b": [3, 4, 5]}, {"a": [1, 2, None], "b": [3, 4, None]}, {"a": [None, 1, 2], "b": [1, 2, None]}, {"a": [None, 1, None], "b": [None, 2, None]}, {"a": [None, None, 1], "b": [1, 2, None]}, {"a": ["d", "e", "f"], "b": ["a", None, "c"]}, ], ) @pytest.mark.parametrize("how", ["all", "any"]) @pytest.mark.parametrize("axis", [0, 1]) @pytest.mark.parametrize("inplace", [True, False]) def test_dropna_dataframe(data, how, axis, inplace): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) expected = pdf.dropna(axis=axis, how=how, inplace=inplace) actual = gdf.dropna(axis=axis, how=how, inplace=inplace) if inplace: expected = pdf actual = gdf assert_eq(expected, actual) @pytest.mark.parametrize("how", ["all", "any"]) @pytest.mark.parametrize( "data", [ { "a": cudf.Series([None, None, None], dtype="float64"), "b": cudf.Series([1, 2, None]), }, { "a": cudf.Series([np.nan, np.nan, np.nan], dtype="float64"), "b": cudf.Series([1, 2, None]), }, cudf.Series([None, None, None], dtype="object"), ], ) @pytest.mark.parametrize("axis", [0, 1]) def test_dropna_with_all_nulls(how, data, axis): gdf = cudf.DataFrame({"a": data}) pdf = gdf.to_pandas() assert_eq(pdf.dropna(axis=axis, how=how), gdf.dropna(axis=axis, how=how)) def test_dropna_nan_as_null(): sr = cudf.Series([1.0, 2.0, np.nan, None], nan_as_null=False) assert_eq(sr.dropna(), sr[:2]) sr = sr.nans_to_nulls() assert_eq(sr.dropna(), sr[:2]) df = cudf.DataFrame( { "a": cudf.Series([1.0, 2.0, np.nan, None], nan_as_null=False), "b": cudf.Series([1, 2, 3, 4]), } ) got = df.dropna() expected = df[:2] assert_eq(expected, got) df = df.nans_to_nulls() got = df.dropna() expected = df[:2] assert_eq(expected, got) @pytest.mark.parametrize( "data,subset", [ ({"a": [1, None], "b": [1, 2]}, ["a"]), ({"a": [1, None], "b": [1, 2]}, ["b"]), ({"a": [1, None], "b": [1, 2]}, []), ({"a": [1, 2], "b": [1, 2]}, ["b"]), ({"a": [1, 2, None], "b": [1, None, 2]}, ["a"]), ({"a": [1, 2, None], "b": [1, None, 2]}, ["b"]), ({"a": [1, 2, None], "b": [1, None, 2]}, ["a", "b"]), ], ) def test_dropna_subset_rows(data, subset): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) assert_eq(pdf.dropna(subset=subset), gdf.dropna(subset=subset)) @pytest.mark.parametrize( "data, subset", [ ({"a": [1, None], "b": [1, 2]}, [0]), ({"a": [1, None], "b": [1, 2]}, [1]), ({"a": [1, None], "b": [1, 2]}, []), ({"a": [1, 2], "b": [1, 2]}, [0]), ({"a": [1, 2], "b": [None, 2], "c": [3, None]}, [0]), ({"a": [1, 2], "b": [None, 2], "c": [3, None]}, [1]), ({"a": [1, 2], "b": [None, 2], "c": [3, None]}, [0, 1]), ], ) def test_dropna_subset_cols(data, subset): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) assert_eq( pdf.dropna(axis=1, subset=subset), gdf.dropna(axis=1, subset=subset) ) # TODO: can't test with subset=[] below since Pandas # returns empty DF when both subset=[] and thresh are specified. @pytest.mark.parametrize("thresh", [0, 1, 2]) @pytest.mark.parametrize("subset", [None, ["a"], ["b"], ["a", "b"]]) def test_dropna_thresh(thresh, subset): pdf = pd.DataFrame({"a": [1, 2, None, None], "b": [1, 2, 3, None]}) gdf = cudf.from_pandas(pdf) assert_eq( pdf.dropna(axis=0, thresh=thresh, subset=subset), gdf.dropna(axis=0, thresh=thresh, subset=subset), ) @pytest.mark.parametrize("thresh", [0, 1, 2]) @pytest.mark.parametrize("subset", [None, [0], [1], [0, 1]]) @pytest.mark.parametrize("inplace", [True, False]) def test_dropna_thresh_cols(thresh, subset, inplace): pdf = pd.DataFrame( {"a": [1, 2], "b": [3, 4], "c": [5, None], "d": [np.nan, np.nan]} ) gdf = cudf.from_pandas(pdf) expected = pdf.dropna( axis=1, thresh=thresh, subset=subset, inplace=inplace ) actual = gdf.dropna(axis=1, thresh=thresh, subset=subset, inplace=inplace) if inplace: expected = pdf actual = gdf assert_eq( expected, actual, ) @pytest.mark.parametrize( "data", [ { "key": [1, 2, 10], "val": cudf.Series([np.nan, 3, 1], nan_as_null=False), "abc": [np.nan, None, 1], }, { "key": [None, 2, 1], "val": cudf.Series([3, np.nan, 0.1], nan_as_null=True), "abc": [None, 1, None], }, ], ) @pytest.mark.parametrize("axis", [0, 1]) def test_dropna_dataframe_np_nan(data, axis): gdf = cudf.DataFrame(data) pd_data = { key: value.to_pandas() if isinstance(value, cudf.Series) else value for key, value in data.items() } pdf = pd.DataFrame(pd_data) assert_eq(pdf.dropna(axis=axis), gdf.dropna(axis=axis), check_dtype=False) @pytest.mark.parametrize( "data, dtype", [ ([1, float("nan"), 2], "float64"), (["x", None, "y"], "str"), (["x", None, "y"], "category"), (["2020-01-20", pd.NaT, "2020-03-15"], "datetime64[ns]"), (["1s", pd.NaT, "3d"], "timedelta64[ns]"), ], ) def test_dropna_index(data, dtype): pi = pd.Index(data, dtype=dtype) gi = cudf.from_pandas(pi) expect = pi.dropna() got = gi.dropna() assert_eq(expect, got) @pytest.mark.parametrize("data", [[[1, None, 2], [None, None, 2]]]) @pytest.mark.parametrize("how", ["all", "any"]) def test_dropna_multiindex(data, how): pi = pd.MultiIndex.from_arrays(data) gi = cudf.from_pandas(pi) expect = pi.dropna(how) got = gi.dropna(how) with pytest.raises(AssertionError, match="different"): # pandas-gh44792. Pandas infers the dtypes as (int64, int64), though # int64 doesn't really store null/nans. The dtype propagates to the # result of dropna. cuDF infers the dtypes as (float, float), which # differs from pandas. assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [ [pd.Timestamp("2020-01-01"), pd.NaT, pd.Timestamp("2020-02-01")], [pd.NaT, pd.NaT, pd.Timestamp("2020-03-01")], ], [ [pd.Timestamp("2020-01-01"), pd.NaT, pd.Timestamp("2020-02-01")], [np.nan, np.nan, 1.0], ], [[1.0, np.nan, 2.0], [np.nan, np.nan, 1.0]], ], ) @pytest.mark.parametrize("how", ["all", "any"]) def test_dropna_multiindex_2(data, how): pi = pd.MultiIndex.from_arrays(data) gi = cudf.from_pandas(pi) expect = pi.dropna(how) got = gi.dropna(how) assert_eq(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_reshape.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. import re from itertools import chain import numpy as np import pandas as pd import pytest import cudf from cudf import melt as cudf_melt from cudf.core.buffer.spill_manager import get_global_manager from cudf.testing._utils import ( ALL_TYPES, DATETIME_TYPES, NUMERIC_TYPES, assert_eq, ) pytest_xfail = pytest.mark.xfail pytestmark = pytest.mark.spilling # If spilling is enabled globally, we skip many test permutations # to reduce running time. if get_global_manager() is not None: ALL_TYPES = ["float32"] # noqa: F811 DATETIME_TYPES = ["datetime64[ms]"] # noqa: F811 NUMERIC_TYPES = ["float32"] # noqa: F811 # To save time, we skip tests marked "pytest.mark.xfail" pytest_xfail = pytest.mark.skipif @pytest.mark.parametrize("num_id_vars", [0, 1, 2]) @pytest.mark.parametrize("num_value_vars", [0, 1, 2]) @pytest.mark.parametrize("num_rows", [1, 2, 100]) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES) @pytest.mark.parametrize("nulls", ["none", "some", "all"]) def test_melt(nulls, num_id_vars, num_value_vars, num_rows, dtype): if dtype not in ["float32", "float64"] and nulls in ["some", "all"]: pytest.skip(reason="nulls not supported in dtype: " + dtype) pdf = pd.DataFrame() id_vars = [] for i in range(num_id_vars): colname = "id" + str(i) data = np.random.randint(0, 26, num_rows).astype(dtype) if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan elif nulls == "all": data[:] = np.nan pdf[colname] = data id_vars.append(colname) value_vars = [] for i in range(num_value_vars): colname = "val" + str(i) data = np.random.randint(0, 26, num_rows).astype(dtype) if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan elif nulls == "all": data[:] = np.nan pdf[colname] = data value_vars.append(colname) gdf = cudf.from_pandas(pdf) got = cudf_melt(frame=gdf, id_vars=id_vars, value_vars=value_vars) got_from_melt_method = gdf.melt(id_vars=id_vars, value_vars=value_vars) expect = pd.melt(frame=pdf, id_vars=id_vars, value_vars=value_vars) # pandas' melt makes the 'variable' column of 'object' type (string) # cuDF's melt makes it Categorical because it doesn't support strings expect["variable"] = expect["variable"].astype("category") assert_eq(expect, got) assert_eq(expect, got_from_melt_method) def test_melt_many_columns(): mydict = {"id": ["foobar"]} for i in range(1, 1942): mydict[f"d_{i}"] = i df = pd.DataFrame(mydict) grid_df = pd.melt(df, id_vars=["id"], var_name="d", value_name="sales") df_d = cudf.DataFrame(mydict) grid_df_d = cudf.melt( df_d, id_vars=["id"], var_name="d", value_name="sales" ) grid_df_d["d"] = grid_df_d["d"].astype("str") assert_eq(grid_df, grid_df_d) @pytest.mark.parametrize("num_cols", [1, 2, 10]) @pytest.mark.parametrize("num_rows", [1, 2, 1000]) @pytest.mark.parametrize( "dtype", list(chain(NUMERIC_TYPES, DATETIME_TYPES, ["str"])) ) @pytest.mark.parametrize("nulls", ["none", "some"]) def test_df_stack(nulls, num_cols, num_rows, dtype): if dtype not in ["float32", "float64"] and nulls in ["some"]: pytest.skip(reason="nulls not supported in dtype: " + dtype) pdf = pd.DataFrame() for i in range(num_cols): colname = str(i) data = np.random.randint(0, 26, num_rows).astype(dtype) if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan pdf[colname] = data gdf = cudf.from_pandas(pdf) got = gdf.stack() expect = pdf.stack() assert_eq(expect, got) def test_df_stack_reset_index(): df = cudf.DataFrame( { "a": [1, 2, 3, 4], "b": [10, 11, 12, 13], "c": ["ab", "cd", None, "gh"], } ) df = df.set_index(["a", "b"]) pdf = df.to_pandas() expected = pdf.stack() actual = df.stack() assert_eq(expected, actual) expected = expected.reset_index() actual = actual.reset_index() assert_eq(expected, actual) @pytest.mark.parametrize( "columns", [ pd.MultiIndex.from_tuples( [("A", "cat"), ("A", "dog"), ("B", "cat"), ("B", "dog")], names=["letter", "animal"], ), pd.MultiIndex.from_tuples( [("A", "cat"), ("B", "bird"), ("A", "dog"), ("B", "dog")], names=["letter", "animal"], ), ], ) @pytest.mark.parametrize( "level", [ -1, 0, 1, "letter", "animal", [0, 1], [1, 0], ["letter", "animal"], ["animal", "letter"], ], ) @pytest.mark.parametrize( "index", [ pd.RangeIndex(2, name="range"), pd.Index([9, 8], name="myindex"), pd.MultiIndex.from_arrays( [ ["A", "B"], [101, 102], ], names=["first", "second"], ), ], ) @pytest.mark.parametrize("dropna", [True, False]) def test_df_stack_multiindex_column_axis(columns, index, level, dropna): if isinstance(level, list) and len(level) > 1 and not dropna: pytest.skip( "Stacking multiple levels with dropna==False is unsupported." ) pdf = pd.DataFrame( data=[[1, 2, 3, 4], [2, 4, 6, 8]], columns=columns, index=index ) gdf = cudf.from_pandas(pdf) got = gdf.stack(level=level, dropna=dropna) expect = pdf.stack(level=level, dropna=dropna) assert_eq(expect, got, check_dtype=False) def test_df_stack_mixed_dtypes(): pdf = pd.DataFrame( { "A": pd.Series([1, 2, 3], dtype="f4"), "B": pd.Series([4, 5, 6], dtype="f8"), } ) gdf = cudf.from_pandas(pdf) got = gdf.stack() expect = pdf.stack() assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize("level", [["animal", "hair_length"], [1, 2]]) def test_df_stack_multiindex_column_axis_pd_example(level): columns = pd.MultiIndex.from_tuples( [ ("A", "cat", "long"), ("B", "cat", "long"), ("A", "dog", "short"), ("B", "dog", "short"), ], names=["exp", "animal", "hair_length"], ) df = pd.DataFrame(np.random.randn(4, 4), columns=columns) expect = df.stack(level=level) got = cudf.from_pandas(df).stack(level=level) assert_eq(expect, got) @pytest.mark.parametrize("num_rows", [1, 2, 10, 1000]) @pytest.mark.parametrize("num_cols", [1, 2, 10]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + DATETIME_TYPES + ["category"] ) @pytest.mark.parametrize("nulls", ["none", "some"]) def test_interleave_columns(nulls, num_cols, num_rows, dtype): if dtype not in ["float32", "float64"] and nulls in ["some"]: pytest.skip(reason="nulls not supported in dtype: " + dtype) pdf = pd.DataFrame(dtype=dtype) for i in range(num_cols): colname = str(i) data = pd.Series(np.random.randint(0, 26, num_rows)).astype(dtype) if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan pdf[colname] = data gdf = cudf.from_pandas(pdf) if dtype == "category": with pytest.raises(ValueError): assert gdf.interleave_columns() else: got = gdf.interleave_columns() expect = pd.Series(np.vstack(pdf.to_numpy()).reshape((-1,))).astype( dtype ) assert_eq(expect, got) @pytest.mark.parametrize("num_cols", [1, 2, 10]) @pytest.mark.parametrize("num_rows", [1, 2, 1000]) @pytest.mark.parametrize("count", [1, 2, 10]) @pytest.mark.parametrize("dtype", ALL_TYPES) @pytest.mark.parametrize("nulls", ["none", "some"]) def test_tile(nulls, num_cols, num_rows, dtype, count): if dtype not in ["float32", "float64"] and nulls in ["some"]: pytest.skip(reason="nulls not supported in dtype: " + dtype) pdf = pd.DataFrame(dtype=dtype) for i in range(num_cols): colname = str(i) data = pd.Series(np.random.randint(num_cols, 26, num_rows)).astype( dtype ) if nulls == "some": idx = np.random.choice( num_rows, size=int(num_rows / 2), replace=False ) data[idx] = np.nan pdf[colname] = data gdf = cudf.from_pandas(pdf) got = gdf.tile(count) expect = pd.DataFrame(pd.concat([pdf] * count)) assert_eq(expect, got) def _prepare_merge_sorted_test( size, nparts, keys, add_null=False, na_position="last", ascending=True, series=False, index=False, ): if index: df = ( cudf.datasets.timeseries()[:size] .reset_index(drop=False) .set_index(keys, drop=True) ) else: df = cudf.datasets.timeseries()[:size].reset_index(drop=False) if add_null: df.iloc[1, df.columns.get_loc(keys[0])] = None chunk = int(size / nparts) indices = [i * chunk for i in range(0, nparts)] + [size] if index: dfs = [ df.iloc[indices[i] : indices[i + 1]] .copy() .sort_index(ascending=ascending) for i in range(nparts) ] elif series: df = df[keys[0]] dfs = [ df.iloc[indices[i] : indices[i + 1]] .copy() .sort_values(na_position=na_position, ascending=ascending) for i in range(nparts) ] else: dfs = [ df.iloc[indices[i] : indices[i + 1]] .copy() .sort_values(keys, na_position=na_position, ascending=ascending) for i in range(nparts) ] return df, dfs @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("na_position", ["first", "last"]) @pytest.mark.parametrize("keys", [None, ["id"], ["name", "timestamp"]]) @pytest.mark.parametrize("nparts", [2, 10]) def test_df_merge_sorted(nparts, keys, na_position, ascending): size = 100 keys_1 = keys or ["timestamp"] # Null values NOT currently supported with Categorical data # or when `ascending=False` add_null = keys_1[0] not in ("name") df, dfs = _prepare_merge_sorted_test( size, nparts, keys_1, add_null=add_null, na_position=na_position, ascending=ascending, ) expect = df.sort_values( keys_1, na_position=na_position, ascending=ascending ) result = cudf.core.reshape._merge_sorted( dfs, keys=keys, na_position=na_position, ascending=ascending ) if keys: expect = expect[keys] result = result[keys] assert expect.index.dtype == result.index.dtype assert_eq(expect.reset_index(drop=True), result.reset_index(drop=True)) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("index", ["id", "x"]) @pytest.mark.parametrize("nparts", [2, 10]) def test_df_merge_sorted_index(nparts, index, ascending): size = 100 df, dfs = _prepare_merge_sorted_test( size, nparts, index, ascending=ascending, index=True ) expect = df.sort_index(ascending=ascending) result = cudf.core.reshape._merge_sorted( dfs, by_index=True, ascending=ascending ) assert_eq(expect.index, result.index) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("na_position", ["first", "last"]) @pytest.mark.parametrize("keys", [None, ["name", "timestamp"]]) def test_df_merge_sorted_ignore_index(keys, na_position, ascending): size = 100 nparts = 3 keys_1 = keys or ["timestamp"] # Null values NOT currently supported with Categorical data # or when `ascending=False` add_null = keys_1[0] not in ("name") df, dfs = _prepare_merge_sorted_test( size, nparts, keys_1, add_null=add_null, na_position=na_position, ascending=ascending, ) expect = df.sort_values( keys_1, na_position=na_position, ascending=ascending ) result = cudf.core.reshape._merge_sorted( dfs, keys=keys, na_position=na_position, ascending=ascending, ignore_index=True, ) if keys: expect = expect[keys] result = result[keys] assert_eq(expect.reset_index(drop=True), result) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("na_position", ["first", "last"]) @pytest.mark.parametrize("key", ["id", "name", "timestamp"]) @pytest.mark.parametrize("nparts", [2, 10]) def test_series_merge_sorted(nparts, key, na_position, ascending): size = 100 df, dfs = _prepare_merge_sorted_test( size, nparts, [key], na_position=na_position, ascending=ascending, series=True, ) expect = df.sort_values(na_position=na_position, ascending=ascending) result = cudf.core.reshape._merge_sorted( dfs, na_position=na_position, ascending=ascending ) assert_eq(expect.reset_index(drop=True), result.reset_index(drop=True)) @pytest.mark.parametrize( "index, column, data", [ ([], [], []), ([0], [0], [0]), ([0, 0], [0, 1], [1, 2.0]), ([0, 1], [0, 0], [1, 2.0]), ([0, 1], [0, 1], [1, 2.0]), (["a", "a", "b", "b"], ["c", "d", "c", "d"], [1, 2, 3, 4]), ( ["a", "a", "b", "b", "a"], ["c", "d", "c", "d", "e"], [1, 2, 3, 4, 5], ), ], ) def test_pivot_simple(index, column, data): pdf = pd.DataFrame({"index": index, "column": column, "data": data}) gdf = cudf.from_pandas(pdf) # In pandas 2.0 this will be a failure because pandas will require all of # these as keyword arguments. Matching that check in cudf is a bit # cumbersome and not worth the effort to match the warning, so this code # just catches pandas's warning (rather than updating the signature) so # that when it starts failing we know to update our impl of pivot. with pytest.warns(FutureWarning): expect = pdf.pivot("index", "column") got = gdf.pivot("index", "column") check_index_and_columns = expect.shape != (0, 0) assert_eq( expect, got, check_dtype=False, check_index_type=check_index_and_columns, check_column_type=check_index_and_columns, ) def test_pivot_multi_values(): # from Pandas docs: # https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.pivot.html pdf = pd.DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": ["A", "B", "C", "A", "B", "C"], "baz": [1, 2, 3, 4, 5, 6], "zoo": ["x", "y", "z", "q", "w", "t"], } ) gdf = cudf.from_pandas(pdf) assert_eq( pdf.pivot(index="foo", columns="bar", values=["baz", "zoo"]), gdf.pivot(index="foo", columns="bar", values=["baz", "zoo"]), check_dtype=False, ) @pytest.mark.parametrize( "values", ["z", "z123", ["z123"], ["z", "z123", "123z"]] ) def test_pivot_values(values): data = [ ["A", "a", 0, 0, 0], ["A", "b", 1, 1, 1], ["A", "c", 2, 2, 2], ["B", "a", 0, 0, 0], ["B", "b", 1, 1, 1], ["B", "c", 2, 2, 2], ["C", "a", 0, 0, 0], ["C", "b", 1, 1, 1], ["C", "c", 2, 2, 2], ] columns = ["x", "y", "z", "z123", "123z"] pdf = pd.DataFrame(data, columns=columns) cdf = cudf.DataFrame(data, columns=columns) expected = pd.pivot(pdf, index="x", columns="y", values=values) actual = cudf.pivot(cdf, index="x", columns="y", values=values) assert_eq( expected, actual, check_dtype=False, ) @pytest.mark.parametrize( "level", [ 0, pytest.param( 1, marks=pytest_xfail( reason="Categorical column indexes not supported" ), ), 2, "foo", pytest.param( "bar", marks=pytest_xfail( reason="Categorical column indexes not supported" ), ), "baz", [], pytest.param( [0, 1], marks=pytest_xfail( reason="Categorical column indexes not supported" ), ), ["foo"], pytest.param( ["foo", "bar"], marks=pytest_xfail( reason="Categorical column indexes not supported" ), ), pytest.param( [0, 1, 2], marks=pytest_xfail(reason="Pandas behaviour unclear"), ), pytest.param( ["foo", "bar", "baz"], marks=pytest_xfail(reason="Pandas behaviour unclear"), ), ], ) def test_unstack_multiindex(level): pdf = pd.DataFrame( { "foo": ["one", "one", "one", "two", "two", "two"], "bar": pd.Categorical(["A", "B", "C", "A", "B", "C"]), "baz": [1, 2, 3, 4, 5, 6], "zoo": ["x", "y", "z", "q", "w", "t"], } ).set_index(["foo", "bar", "baz"]) gdf = cudf.from_pandas(pdf) assert_eq( pdf.unstack(level=level), gdf.unstack(level=level), check_dtype=False, ) @pytest.mark.parametrize( "data", [{"A": [1.0, 2.0, 3.0, 4.0, 5.0], "B": [11.0, 12.0, 13.0, 14.0, 15.0]}], ) @pytest.mark.parametrize( "index", [ pd.Index(range(0, 5), name=None), pd.Index(range(0, 5), name="row_index"), pytest.param( pd.CategoricalIndex(["d", "e", "f", "g", "h"]), marks=pytest_xfail( reason="Categorical column indexes not supported" ), ), ], ) @pytest.mark.parametrize( "col_idx", [ pd.Index(["a", "b"], name=None), pd.Index(["a", "b"], name="col_index"), pd.MultiIndex.from_tuples([("c", 1), ("c", 2)], names=[None, None]), pd.MultiIndex.from_tuples( [("c", 1), ("c", 2)], names=["col_index1", "col_index2"] ), ], ) def test_unstack_index(data, index, col_idx): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) pdf.index = index pdf.columns = col_idx gdf.index = cudf.from_pandas(index) gdf.columns = cudf.from_pandas(col_idx) assert_eq(pdf.unstack(), gdf.unstack()) def test_unstack_index_invalid(): gdf = cudf.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}) with pytest.raises( ValueError, match=re.escape( "Calling unstack() on single index dataframe with " "different column datatype is not supported." ), ): gdf.unstack() def test_pivot_duplicate_error(): gdf = cudf.DataFrame( {"a": [0, 1, 2, 2], "b": [1, 2, 3, 3], "d": [1, 2, 3, 4]} ) with pytest.raises(ValueError): gdf.pivot(index="a", columns="b") with pytest.raises(ValueError): gdf.pivot(index="b", columns="a") @pytest.mark.parametrize( "data", [ { "A": ["one", "one", "two", "three"] * 6, "B": ["A", "B", "C"] * 8, "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4, "D": np.random.randn(24), "E": np.random.randn(24), } ], ) @pytest.mark.parametrize( "aggfunc", ["mean", "count", {"D": "sum", "E": "count"}] ) @pytest.mark.parametrize("fill_value", [0]) def test_pivot_table_simple(data, aggfunc, fill_value): pdf = pd.DataFrame(data) expected = pd.pivot_table( pdf, values=["D", "E"], index=["A", "B"], columns=["C"], aggfunc=aggfunc, fill_value=fill_value, ) cdf = cudf.DataFrame(data) actual = cudf.pivot_table( cdf, values=["D", "E"], index=["A", "B"], columns=["C"], aggfunc=aggfunc, fill_value=fill_value, ) assert_eq(expected, actual, check_dtype=False) @pytest.mark.parametrize( "data", [ { "A": ["one", "one", "two", "three"] * 6, "B": ["A", "B", "C"] * 8, "C": ["foo", "foo", "foo", "bar", "bar", "bar"] * 4, "D": np.random.randn(24), "E": np.random.randn(24), } ], ) @pytest.mark.parametrize( "aggfunc", ["mean", "count", {"D": "sum", "E": "count"}] ) @pytest.mark.parametrize("fill_value", [0]) def test_dataframe_pivot_table_simple(data, aggfunc, fill_value): pdf = pd.DataFrame(data) expected = pdf.pivot_table( values=["D", "E"], index=["A", "B"], columns=["C"], aggfunc=aggfunc, fill_value=fill_value, ) cdf = cudf.DataFrame(data) actual = cdf.pivot_table( values=["D", "E"], index=["A", "B"], columns=["C"], aggfunc=aggfunc, fill_value=fill_value, ) assert_eq(expected, actual, check_dtype=False) def test_crosstab_simple(): a = np.array( [ "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar", "foo", "foo", "foo", ], dtype=object, ) b = np.array( [ "one", "one", "one", "two", "one", "one", "one", "two", "two", "two", "one", ], dtype=object, ) c = np.array( [ "dull", "dull", "shiny", "dull", "dull", "shiny", "shiny", "dull", "shiny", "shiny", "shiny", ], dtype=object, ) expected = pd.crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"]) actual = cudf.crosstab(a, [b, c], rownames=["a"], colnames=["b", "c"]) assert_eq(expected, actual, check_dtype=False)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_series.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import decimal import hashlib import operator import re from collections import OrderedDict, defaultdict from string import ascii_letters, digits import cupy as cp import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.core._compat import PANDAS_LT_140 from cudf.testing._utils import ( NUMERIC_TYPES, SERIES_OR_INDEX_NAMES, TIMEDELTA_TYPES, _create_cudf_series_float64_default, _create_pandas_series_float64_default, assert_eq, assert_exceptions_equal, expect_warning_if, gen_rand, ) def _series_na_data(): return [ pd.Series([0, 1, 2, np.nan, 4, None, 6]), pd.Series( [0, 1, 2, np.nan, 4, None, 6], index=["q", "w", "e", "r", "t", "y", "u"], name="a", ), pd.Series([0, 1, 2, 3, 4]), pd.Series(["a", "b", "u", "h", "d"]), pd.Series([None, None, np.nan, None, np.inf, -np.inf]), pd.Series([], dtype="float64"), pd.Series( [pd.NaT, pd.Timestamp("1939-05-27"), pd.Timestamp("1940-04-25")] ), pd.Series([np.nan]), pd.Series([None]), pd.Series(["a", "b", "", "c", None, "e"]), ] @pytest.mark.parametrize( "data", [ {"a": 1, "b": 2, "c": 24, "d": 1010}, {"a": 1}, {1: "a", 2: "b", 24: "c", 1010: "d"}, {1: "a"}, ], ) def test_series_init_dict(data): pandas_series = pd.Series(data) cudf_series = cudf.Series(data) assert_eq(pandas_series, cudf_series) @pytest.mark.parametrize( "data", [ { "a": [1, 2, 3], "b": [2, 3, 5], "c": [24, 12212, 22233], "d": [1010, 101010, 1111], }, {"a": [1]}, ], ) def test_series_init_dict_lists(data): assert_eq(pd.Series(data), cudf.Series(data)) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4], [1.0, 12.221, 12.34, 13.324, 324.3242], [-10, -1111, 100, 11, 133], ], ) @pytest.mark.parametrize( "others", [ [10, 11, 12, 13], [0.1, 0.002, 324.2332, 0.2342], [-10, -1111, 100, 11, 133], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) def test_series_append_basic(data, others, ignore_index): psr = pd.Series(data) gsr = cudf.Series(data) other_ps = pd.Series(others) other_gs = cudf.Series(others) with pytest.warns(FutureWarning): expected = psr.append(other_ps, ignore_index=ignore_index) with pytest.warns(FutureWarning): actual = gsr.append(other_gs, ignore_index=ignore_index) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [ "abc", "def", "this is a string", "this is another string", "a", "b", "c", ], ["a"], ], ) @pytest.mark.parametrize( "others", [ [ "abc", "def", "this is a string", "this is another string", "a", "b", "c", ], ["a"], ["1", "2", "3", "4", "5"], ["+", "-", "!", "_", "="], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) def test_series_append_basic_str(data, others, ignore_index): psr = pd.Series(data) gsr = cudf.Series(data) other_ps = pd.Series(others) other_gs = cudf.Series(others) with pytest.warns(FutureWarning): expected = psr.append(other_ps, ignore_index=ignore_index) with pytest.warns(FutureWarning): actual = gsr.append(other_gs, ignore_index=ignore_index) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ pd.Series( [ "abc", "def", "this is a string", "this is another string", "a", "b", "c", ], index=[10, 20, 30, 40, 50, 60, 70], ), pd.Series(["a"], index=[2]), ], ) @pytest.mark.parametrize( "others", [ pd.Series( [ "abc", "def", "this is a string", "this is another string", "a", "b", "c", ], index=[10, 20, 30, 40, 50, 60, 70], ), pd.Series(["a"], index=[133]), pd.Series(["1", "2", "3", "4", "5"], index=[-10, 22, 33, 44, 49]), pd.Series(["+", "-", "!", "_", "="], index=[11, 22, 33, 44, 2]), ], ) @pytest.mark.parametrize("ignore_index", [True, False]) def test_series_append_series_with_index(data, others, ignore_index): psr = pd.Series(data) gsr = cudf.Series(data) other_ps = others other_gs = cudf.from_pandas(others) with pytest.warns(FutureWarning): expected = psr.append(other_ps, ignore_index=ignore_index) with pytest.warns(FutureWarning): actual = gsr.append(other_gs, ignore_index=ignore_index) assert_eq(expected, actual) def test_series_append_error_mixed_types(): gsr = cudf.Series([1, 2, 3, 4]) other = cudf.Series(["a", "b", "c", "d"]) with pytest.raises( TypeError, match="cudf does not support mixed types, please type-cast " "both series to same dtypes.", ): with pytest.warns(FutureWarning): gsr.append(other) with pytest.raises( TypeError, match="cudf does not support mixed types, please type-cast " "both series to same dtypes.", ): with pytest.warns(FutureWarning): gsr.append([gsr, other, gsr, other]) @pytest.mark.parametrize( "data", [ pd.Series([1, 2, 3, 4], index=["a", "b", "c", "d"]), pd.Series( [1.0, 12.221, 12.34, 13.324, 324.3242], index=[ "float one", "float two", "float three", "float four", "float five", ], ), pd.Series( [-10, -1111, 100, 11, 133], index=["one", "two", "three", "four", "five"], ), ], ) @pytest.mark.parametrize( "others", [ [ pd.Series([10, 11, 12, 13], index=["a", "b", "c", "d"]), pd.Series([12, 14, 15, 27], index=["d", "e", "z", "x"]), ], [ pd.Series([10, 11, 12, 13], index=["a", "b", "c", "d"]), pd.Series([12, 14, 15, 27], index=["d", "e", "z", "x"]), ] * 25, [ pd.Series( [0.1, 0.002, 324.2332, 0.2342], index=["-", "+", "%", "#"] ), pd.Series([12, 14, 15, 27], index=["d", "e", "z", "x"]), ] * 46, [ pd.Series( [-10, -1111, 100, 11, 133], index=["aa", "vv", "bb", "dd", "ll"], ) ], ], ) @pytest.mark.parametrize("ignore_index", [True, False]) def test_series_append_list_series_with_index(data, others, ignore_index): psr = pd.Series(data) gsr = cudf.Series(data) other_ps = others other_gs = [cudf.from_pandas(obj) for obj in others] with pytest.warns(FutureWarning): expected = psr.append(other_ps, ignore_index=ignore_index) with pytest.warns(FutureWarning): actual = gsr.append(other_gs, ignore_index=ignore_index) assert_eq(expected, actual) def test_series_append_existing_buffers(): a1 = np.arange(10, dtype=np.float64) gs = cudf.Series(a1) # Add new buffer a2 = cudf.Series(np.arange(5)) with pytest.warns(FutureWarning): gs = gs.append(a2) assert len(gs) == 15 np.testing.assert_equal(gs.to_numpy(), np.hstack([a1, a2.to_numpy()])) # Ensure appending to previous buffer a3 = cudf.Series(np.arange(3)) with pytest.warns(FutureWarning): gs = gs.append(a3) assert len(gs) == 18 a4 = np.hstack([a1, a2.to_numpy(), a3.to_numpy()]) np.testing.assert_equal(gs.to_numpy(), a4) # Appending different dtype a5 = cudf.Series(np.array([1, 2, 3], dtype=np.int32)) a6 = cudf.Series(np.array([4.5, 5.5, 6.5], dtype=np.float64)) with pytest.warns(FutureWarning): gs = a5.append(a6) np.testing.assert_equal( gs.to_numpy(), np.hstack([a5.to_numpy(), a6.to_numpy()]) ) with pytest.warns(FutureWarning): gs = cudf.Series(a6).append(a5) np.testing.assert_equal( gs.to_numpy(), np.hstack([a6.to_numpy(), a5.to_numpy()]) ) def test_series_column_iter_error(): gs = cudf.Series([1, 2, 3]) with pytest.raises( TypeError, match=re.escape( f"{gs.__class__.__name__} object is not iterable. " f"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` " f"if you wish to iterate over the values." ), ): iter(gs) with pytest.raises( TypeError, match=re.escape( f"{gs.__class__.__name__} object is not iterable. " f"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` " f"if you wish to iterate over the values." ), ): gs.items() with pytest.raises( TypeError, match=re.escape( f"{gs.__class__.__name__} object is not iterable. " f"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` " f"if you wish to iterate over the values." ), ): gs.iteritems() with pytest.raises(TypeError): iter(gs._column) @pytest.mark.parametrize( "data", [ [1.0, 2.0, None, 4.0, 5.0], ["a", "b", "c", "d", "e"], ["a", "b", None, "d", "e"], [None, None, None, None, None], np.array(["1991-11-20", "2004-12-04"], dtype=np.datetime64), np.array(["1991-11-20", None], dtype=np.datetime64), np.array( ["1991-11-20 05:15:00", "2004-12-04 10:00:00"], dtype=np.datetime64 ), np.array(["1991-11-20 05:15:00", None], dtype=np.datetime64), ], ) def test_series_tolist(data): psr = pd.Series(data) gsr = cudf.from_pandas(psr) with pytest.raises( TypeError, match=re.escape( r"cuDF does not support conversion to host memory " r"via the `tolist()` method. Consider using " r"`.to_arrow().to_pylist()` to construct a Python list." ), ): gsr.tolist() @pytest.mark.parametrize( "data", [[], [None, None], ["a"], ["a", "b", "c"] * 500, [1.0, 2.0, 0.3] * 57], ) def test_series_size(data): psr = _create_pandas_series_float64_default(data) gsr = _create_cudf_series_float64_default(data) assert_eq(psr.size, gsr.size) @pytest.mark.parametrize("dtype", NUMERIC_TYPES) def test_series_describe_numeric(dtype): ps = pd.Series([0, 1, 2, 3, 1, 2, 3], dtype=dtype) gs = cudf.from_pandas(ps) with pytest.warns(FutureWarning): actual = gs.describe() expected = ps.describe() assert_eq(expected, actual, check_dtype=True) @pytest.mark.parametrize("dtype", ["datetime64[ns]"]) def test_series_describe_datetime(dtype): # Note that other datetime units are not tested because pandas does not # support them. When specified coarser units, cuDF datetime columns cannot # represent fractional time for quantiles of the column, which may require # interpolation, this differs from pandas which always stay in [ns] unit. gs = cudf.Series([0, 1, 2, 3, 1, 2, 3], dtype=dtype) ps = gs.to_pandas() # Treating datetimes as categoricals is deprecated in pandas and will # be removed in future. Future behavior is treating datetime as numeric. expected = ps.describe(datetime_is_numeric=True) with pytest.warns(FutureWarning): actual = gs.describe() assert_eq(expected.astype("str"), actual) @pytest.mark.parametrize("dtype", TIMEDELTA_TYPES) def test_series_describe_timedelta(dtype): ps = pd.Series([0, 1, 2, 3, 1, 2, 3], dtype=dtype) gs = cudf.from_pandas(ps) expected = ps.describe() with pytest.warns(FutureWarning): actual = gs.describe() assert_eq(actual, expected.astype("str")) @pytest.mark.parametrize( "ps", [ pd.Series(["a", "b", "c", "d", "e", "a"]), pd.Series([True, False, True, True, False]), pd.Series([], dtype="str"), pd.Series(["a", "b", "c", "a"], dtype="category"), pd.Series(["d", "e", "f"], dtype="category"), pd.Series(pd.Categorical(["d", "e", "f"], categories=["f", "e", "d"])), pd.Series( pd.Categorical( ["d", "e", "f"], categories=["f", "e", "d"], ordered=True ) ), ], ) def test_series_describe_other_types(ps): gs = cudf.from_pandas(ps) expected = ps.describe() with pytest.warns(FutureWarning): actual = gs.describe() if len(ps) == 0: assert_eq(expected.fillna("a").astype("str"), actual.fillna("a")) else: assert_eq(expected.astype("str"), actual) @pytest.mark.parametrize( "data", [ [1, 2, 3, 2, 1], [1, 2, None, 3, 1, 1], [], ["a", "b", "c", None, "z", "a"], ], ) @pytest.mark.parametrize("na_sentinel", [99999, 11, -1, 0]) def test_series_factorize(data, na_sentinel): gsr = _create_cudf_series_float64_default(data) psr = gsr.to_pandas() with pytest.warns(FutureWarning): expected_labels, expected_cats = psr.factorize(na_sentinel=na_sentinel) with pytest.warns(FutureWarning): actual_labels, actual_cats = gsr.factorize(na_sentinel=na_sentinel) assert_eq(expected_labels, actual_labels.get()) assert_eq(expected_cats.values, actual_cats.to_pandas().values) @pytest.mark.parametrize( "data", [ [1, 2, 3, 2, 1], [1, 2, None, 3, 1, 1], [], ["a", "b", "c", None, "z", "a"], ], ) @pytest.mark.parametrize("use_na_sentinel", [True, False]) def test_series_factorize_use_na_sentinel(data, use_na_sentinel): gsr = _create_cudf_series_float64_default(data) psr = gsr.to_pandas(nullable=True) expected_labels, expected_cats = psr.factorize( use_na_sentinel=use_na_sentinel, sort=True ) actual_labels, actual_cats = gsr.factorize( use_na_sentinel=use_na_sentinel, sort=True ) assert_eq(expected_labels, actual_labels.get()) assert_eq(expected_cats, actual_cats.to_pandas(nullable=True)) @pytest.mark.parametrize( "data", [ [1, 2, 3, 2, 1], [1, 2, None, 3, 1, 1], [], ["a", "b", "c", None, "z", "a"], ], ) @pytest.mark.parametrize("sort", [True, False]) def test_series_factorize_sort(data, sort): gsr = _create_cudf_series_float64_default(data) psr = gsr.to_pandas(nullable=True) expected_labels, expected_cats = psr.factorize(sort=sort) actual_labels, actual_cats = gsr.factorize(sort=sort) assert_eq(expected_labels, actual_labels.get()) assert_eq(expected_cats, actual_cats.to_pandas(nullable=True)) @pytest.mark.parametrize( "data", [ pd.Series([], dtype="datetime64[ns]"), pd.Series(pd.date_range("2010-01-01", "2010-02-01")), pd.Series([None, None], dtype="datetime64[ns]"), ], ) @pytest.mark.parametrize("dropna", [True, False]) @pytest.mark.parametrize("normalize", [True, False]) @pytest.mark.parametrize("nulls", ["none", "some"]) def test_series_datetime_value_counts(data, nulls, normalize, dropna): psr = data.copy() if len(data) > 0: if nulls == "one": p = np.random.randint(0, len(data)) psr[p] = None elif nulls == "some": p = np.random.randint(0, len(data), 2) psr[p] = None gsr = cudf.from_pandas(psr) expected = psr.value_counts(dropna=dropna, normalize=normalize) got = gsr.value_counts(dropna=dropna, normalize=normalize) assert_eq(expected.sort_index(), got.sort_index(), check_dtype=False) assert_eq( expected.reset_index(drop=True), got.reset_index(drop=True), check_dtype=False, check_index_type=True, ) @pytest.mark.parametrize("dropna", [True, False]) @pytest.mark.parametrize("normalize", [True, False]) @pytest.mark.parametrize("num_elements", [10, 100, 1000]) def test_categorical_value_counts(dropna, normalize, num_elements): # create categorical series np.random.seed(12) pd_cat = pd.Categorical( pd.Series( np.random.choice(list(ascii_letters + digits), num_elements), dtype="category", ) ) # gdf gdf = cudf.DataFrame() gdf["a"] = cudf.Series.from_categorical(pd_cat) gdf_value_counts = gdf["a"].value_counts( dropna=dropna, normalize=normalize ) # pandas pdf = pd.DataFrame() pdf["a"] = pd_cat pdf_value_counts = pdf["a"].value_counts( dropna=dropna, normalize=normalize ) # verify assert_eq( pdf_value_counts.sort_index(), gdf_value_counts.sort_index(), check_dtype=False, check_index_type=True, ) assert_eq( pdf_value_counts.reset_index(drop=True), gdf_value_counts.reset_index(drop=True), check_dtype=False, check_index_type=True, ) @pytest.mark.parametrize("dropna", [True, False]) @pytest.mark.parametrize("normalize", [True, False]) def test_series_value_counts(dropna, normalize): for size in [10**x for x in range(5)]: arr = np.random.randint(low=-1, high=10, size=size) mask = arr != -1 sr = cudf.Series.from_masked_array( arr, cudf.Series(mask)._column.as_mask() ) sr.name = "col" expect = ( sr.to_pandas() .value_counts(dropna=dropna, normalize=normalize) .sort_index() ) got = sr.value_counts(dropna=dropna, normalize=normalize).sort_index() assert_eq(expect, got, check_dtype=True, check_index_type=False) @pytest.mark.parametrize("bins", [1, 2, 3]) def test_series_value_counts_bins(bins): psr = pd.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0]) gsr = cudf.from_pandas(psr) expected = psr.value_counts(bins=bins) got = gsr.value_counts(bins=bins) assert_eq(expected.sort_index(), got.sort_index(), check_dtype=True) @pytest.mark.parametrize("bins", [1, 2, 3]) @pytest.mark.parametrize("dropna", [True, False]) def test_series_value_counts_bins_dropna(bins, dropna): psr = pd.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0, np.nan]) gsr = cudf.from_pandas(psr) expected = psr.value_counts(bins=bins, dropna=dropna) got = gsr.value_counts(bins=bins, dropna=dropna) assert_eq(expected.sort_index(), got.sort_index(), check_dtype=True) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("dropna", [True, False]) @pytest.mark.parametrize("normalize", [True, False]) def test_series_value_counts_optional_arguments(ascending, dropna, normalize): psr = pd.Series([1.0, 2.0, 2.0, 3.0, 3.0, 3.0, None]) gsr = cudf.from_pandas(psr) expected = psr.value_counts( ascending=ascending, dropna=dropna, normalize=normalize ) got = gsr.value_counts( ascending=ascending, dropna=dropna, normalize=normalize ) assert_eq(expected.sort_index(), got.sort_index(), check_dtype=True) assert_eq( expected.reset_index(drop=True), got.reset_index(drop=True), check_dtype=True, ) @pytest.mark.parametrize( "gs", [ cudf.Series([1, 2, 3]), cudf.Series([None]), cudf.Series([4]), cudf.Series([2, 3, -1, 0, 1], name="test name"), cudf.Series( [1, 2, 3, None, 2, 1], index=["a", "v", "d", "e", "f", "g"] ), cudf.Series([1, 2, 3, None, 2, 1, None], name="abc"), cudf.Series(["ab", "bc", "ab", None, "bc", None, None]), cudf.Series([None, None, None, None, None], dtype="str"), cudf.Series([None, None, None, None, None]), cudf.Series( [ 123213, 23123, 123123, 12213123, 12213123, 12213123, 23123, 2312323123, None, None, ], dtype="timedelta64[ns]", ), cudf.Series( [ None, 1, 2, 3242434, 3233243, 1, 2, 1023, None, 12213123, None, 2312323123, None, None, ], dtype="datetime64[ns]", ), cudf.Series(name="empty series", dtype="float64"), cudf.Series(["a", "b", "c", " ", "a", "b", "z"], dtype="category"), ], ) @pytest.mark.parametrize("dropna", [True, False]) def test_series_mode(gs, dropna): ps = gs.to_pandas() expected = ps.mode(dropna=dropna) actual = gs.mode(dropna=dropna) assert_eq(expected, actual, check_dtype=False) @pytest.mark.parametrize( "arr", [ np.random.normal(-100, 100, 1000), np.random.randint(-50, 50, 1000), np.zeros(100), np.repeat([-0.6459412758761901], 100), np.repeat(np.nan, 100), np.array([1.123, 2.343, np.nan, 0.0]), np.arange(-100.5, 101.5, 1), ], ) @pytest.mark.parametrize("decimals", [-5, -3, -1, 0, 1, 4, 12, np.int8(1)]) def test_series_round(arr, decimals): pser = pd.Series(arr) ser = cudf.Series(arr) result = ser.round(decimals) expected = pser.round(decimals) assert_eq(result, expected) # with nulls, maintaining existing null mask arr = arr.astype("float64") # for pandas nulls arr.ravel()[ np.random.choice(arr.shape[0], arr.shape[0] // 2, replace=False) ] = np.nan pser = pd.Series(arr) ser = cudf.Series(arr) result = ser.round(decimals) expected = pser.round(decimals) assert_eq(result, expected) def test_series_round_half_up(): s = cudf.Series([0.0, 1.0, 1.2, 1.7, 0.5, 1.5, 2.5, None]) expect = cudf.Series([0.0, 1.0, 1.0, 2.0, 1.0, 2.0, 3.0, None]) got = s.round(how="half_up") assert_eq(expect, got) @pytest.mark.parametrize( "series", [ cudf.Series([1.0, None, np.nan, 4.0], nan_as_null=False), cudf.Series([1.24430, None, np.nan, 4.423530], nan_as_null=False), cudf.Series([1.24430, np.nan, 4.423530], nan_as_null=False), cudf.Series([-1.24430, np.nan, -4.423530], nan_as_null=False), cudf.Series(np.repeat(np.nan, 100)), ], ) @pytest.mark.parametrize("decimal", [0, 1, 2, 3]) def test_round_nan_as_null_false(series, decimal): pser = series.to_pandas() result = series.round(decimal) expected = pser.round(decimal) assert_eq(result, expected, atol=1e-10) @pytest.mark.parametrize("ps", _series_na_data()) @pytest.mark.parametrize("nan_as_null", [True, False, None]) def test_series_isnull_isna(ps, nan_as_null): gs = cudf.Series.from_pandas(ps, nan_as_null=nan_as_null) assert_eq(ps.isnull(), gs.isnull()) assert_eq(ps.isna(), gs.isna()) @pytest.mark.parametrize("ps", _series_na_data()) @pytest.mark.parametrize("nan_as_null", [True, False, None]) def test_series_notnull_notna(ps, nan_as_null): gs = cudf.Series.from_pandas(ps, nan_as_null=nan_as_null) assert_eq(ps.notnull(), gs.notnull()) assert_eq(ps.notna(), gs.notna()) @pytest.mark.parametrize( "sr1", [pd.Series([10, 11, 12], index=["a", "b", "z"]), pd.Series(["a"])] ) @pytest.mark.parametrize( "sr2", [pd.Series([], dtype="float64"), pd.Series(["a", "a", "c", "z", "A"])], ) @pytest.mark.parametrize( "op", [ operator.eq, operator.ne, operator.lt, operator.gt, operator.le, operator.ge, ], ) def test_series_error_equality(sr1, sr2, op): gsr1 = cudf.from_pandas(sr1) gsr2 = cudf.from_pandas(sr2) assert_exceptions_equal(op, op, ([sr1, sr2],), ([gsr1, gsr2],)) def test_series_memory_usage(): sr = cudf.Series([1, 2, 3, 4], dtype="int64") assert sr.memory_usage() == 32 sliced_sr = sr[2:] assert sliced_sr.memory_usage() == 16 sliced_sr[3] = None assert sliced_sr.memory_usage() == 80 sr = cudf.Series(["hello world", "rapids ai", "abc", "z"]) assert sr.memory_usage() == 44 assert sr[3:].memory_usage() == 9 # z assert sr[:1].memory_usage() == 19 # hello world @pytest.mark.parametrize( "sr,expected_psr", [ ( cudf.Series([1, 2, None, 3], dtype="uint8"), pd.Series([1, 2, None, 3], dtype=pd.UInt8Dtype()), ), ( cudf.Series([23, None, None, 32], dtype="uint16"), pd.Series([23, None, None, 32], dtype=pd.UInt16Dtype()), ), ( cudf.Series([None, 123, None, 1], dtype="uint32"), pd.Series([None, 123, None, 1], dtype=pd.UInt32Dtype()), ), ( cudf.Series([234, 2323, 23432, None, None, 224], dtype="uint64"), pd.Series( [234, 2323, 23432, None, None, 224], dtype=pd.UInt64Dtype() ), ), ( cudf.Series([-10, 1, None, -1, None, 3], dtype="int8"), pd.Series([-10, 1, None, -1, None, 3], dtype=pd.Int8Dtype()), ), ( cudf.Series([111, None, 222, None, 13], dtype="int16"), pd.Series([111, None, 222, None, 13], dtype=pd.Int16Dtype()), ), ( cudf.Series([11, None, 22, 33, None, 2, None, 3], dtype="int32"), pd.Series( [11, None, 22, 33, None, 2, None, 3], dtype=pd.Int32Dtype() ), ), ( cudf.Series( [32431, None, None, 32322, 0, 10, -32324, None], dtype="int64" ), pd.Series( [32431, None, None, 32322, 0, 10, -32324, None], dtype=pd.Int64Dtype(), ), ), ( cudf.Series( [True, None, False, None, False, True, True, False], dtype="bool_", ), pd.Series( [True, None, False, None, False, True, True, False], dtype=pd.BooleanDtype(), ), ), ( cudf.Series( [ "abc", "a", None, "hello world", "foo buzz", "", None, "rapids ai", ], dtype="object", ), pd.Series( [ "abc", "a", None, "hello world", "foo buzz", "", None, "rapids ai", ], dtype=pd.StringDtype(), ), ), ( cudf.Series( [1, 2, None, 10.2, None], dtype="float32", ), pd.Series( [1, 2, None, 10.2, None], dtype=pd.Float32Dtype(), ), ), ], ) def test_series_to_pandas_nullable_dtypes(sr, expected_psr): actual_psr = sr.to_pandas(nullable=True) assert_eq(actual_psr, expected_psr) def test_series_pipe(): psr = pd.Series([10, 20, 30, 40]) gsr = cudf.Series([10, 20, 30, 40]) def custom_add_func(sr, val): new_sr = sr + val return new_sr def custom_to_str_func(sr, val): new_sr = sr.astype("str") + val return new_sr expected = ( psr.pipe(custom_add_func, 11) .pipe(custom_add_func, val=12) .pipe(custom_to_str_func, "rapids") ) actual = ( gsr.pipe(custom_add_func, 11) .pipe(custom_add_func, val=12) .pipe(custom_to_str_func, "rapids") ) assert_eq(expected, actual) expected = ( psr.pipe((custom_add_func, "sr"), val=11) .pipe(custom_add_func, val=1) .pipe(custom_to_str_func, "rapids-ai") ) actual = ( gsr.pipe((custom_add_func, "sr"), val=11) .pipe(custom_add_func, val=1) .pipe(custom_to_str_func, "rapids-ai") ) assert_eq(expected, actual) def test_series_pipe_error(): psr = pd.Series([10, 20, 30, 40]) gsr = cudf.Series([10, 20, 30, 40]) def custom_add_func(sr, val): new_sr = sr + val return new_sr assert_exceptions_equal( lfunc=psr.pipe, rfunc=gsr.pipe, lfunc_args_and_kwargs=([(custom_add_func, "val")], {"val": 11}), rfunc_args_and_kwargs=([(custom_add_func, "val")], {"val": 11}), ) @pytest.mark.parametrize( "data", [cudf.Series([1, 2, 3]), cudf.Series([10, 11, 12], index=[1, 2, 3])], ) @pytest.mark.parametrize( "other", [ cudf.Series([4, 5, 6]), cudf.Series([4, 5, 6, 7, 8]), cudf.Series([4, np.nan, 6], nan_as_null=False), [4, np.nan, 6], {1: 9}, ], ) def test_series_update(data, other): gs = data.copy(deep=True) if isinstance(other, cudf.Series): g_other = other.copy(deep=True) p_other = g_other.to_pandas() else: g_other = other p_other = other ps = gs.to_pandas() ps.update(p_other) with expect_warning_if( isinstance(other, cudf.Series) and other.isna().any(), UserWarning ): gs.update(g_other) assert_eq(gs, ps) @pytest.mark.parametrize( "data", [ [1, None, 11, 2.0, np.nan], [np.nan], [None, None, None], [np.nan, 1, 10, 393.32, np.nan], ], ) @pytest.mark.parametrize("nan_as_null", [True, False]) @pytest.mark.parametrize("fill_value", [1.2, 332, np.nan]) def test_fillna_with_nan(data, nan_as_null, fill_value): gs = cudf.Series(data, dtype="float64", nan_as_null=nan_as_null) ps = gs.to_pandas() expected = ps.fillna(fill_value) actual = gs.fillna(fill_value) assert_eq(expected, actual) def test_series_mask_mixed_dtypes_error(): s = cudf.Series(["a", "b", "c"]) with pytest.raises( TypeError, match=re.escape( "cudf does not support mixed types, please type-cast " "the column of dataframe/series and other " "to same dtypes." ), ): s.where([True, False, True], [1, 2, 3]) @pytest.mark.parametrize( "ps", [ pd.Series(["a"] * 20, index=range(0, 20)), pd.Series(["b", None] * 10, index=range(0, 20), name="ASeries"), pd.Series( ["b", None] * 5, index=pd.Index(list(range(10)), dtype="uint64"), name="BSeries", ), ], ) @pytest.mark.parametrize( "labels", [ [1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), pd.Index([0, 1, 2, 3, 4], dtype="float32"), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_series_drop_labels(ps, labels, inplace): ps = ps.copy() gs = cudf.from_pandas(ps) expected = ps.drop(labels=labels, axis=0, inplace=inplace) actual = gs.drop(labels=labels, axis=0, inplace=inplace) if inplace: expected = ps actual = gs assert_eq(expected, actual) @pytest.mark.parametrize( "ps", [ pd.Series(["a"] * 20, index=range(0, 20)), pd.Series(["b", None] * 10, index=range(0, 20), name="ASeries"), ], ) @pytest.mark.parametrize( "index", [[1], [0], 1, 5, [5, 9], pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])], ) @pytest.mark.parametrize("inplace", [True, False]) def test_series_drop_index(ps, index, inplace): ps = ps.copy() gs = cudf.from_pandas(ps) expected = ps.drop(index=index, inplace=inplace) actual = gs.drop(index=index, inplace=inplace) if inplace: expected = ps actual = gs assert_eq(expected, actual) @pytest.mark.parametrize( "ps", [ pd.Series( ["a" if i % 2 == 0 else "b" for i in range(0, 10)], index=pd.MultiIndex( levels=[ ["lama", "cow", "falcon"], ["speed", "weight", "length"], ], codes=[ [0, 0, 0, 1, 1, 1, 2, 2, 2, 1], [0, 1, 2, 0, 1, 2, 0, 1, 2, 1], ], ), name="abc", ) ], ) @pytest.mark.parametrize( "index,level", [ ("cow", 0), ("lama", 0), ("falcon", 0), ("speed", 1), ("weight", 1), ("length", 1), ( "cow", None, ), ( "lama", None, ), ( "falcon", None, ), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_series_drop_multiindex(ps, index, level, inplace): ps = ps.copy() gs = cudf.from_pandas(ps) expected = ps.drop(index=index, inplace=inplace, level=level) actual = gs.drop(index=index, inplace=inplace, level=level) if inplace: expected = ps actual = gs assert_eq(expected, actual) def test_series_drop_edge_inputs(): gs = cudf.Series([42], name="a") ps = gs.to_pandas() assert_eq(ps.drop(columns=["b"]), gs.drop(columns=["b"])) assert_eq(ps.drop(columns="b"), gs.drop(columns="b")) assert_exceptions_equal( lfunc=ps.drop, rfunc=gs.drop, lfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}), rfunc_args_and_kwargs=(["a"], {"columns": "a", "axis": 1}), ) assert_exceptions_equal( lfunc=ps.drop, rfunc=gs.drop, lfunc_args_and_kwargs=([], {}), rfunc_args_and_kwargs=([], {}), ) assert_exceptions_equal( lfunc=ps.drop, rfunc=gs.drop, lfunc_args_and_kwargs=(["b"], {"axis": 1}), rfunc_args_and_kwargs=(["b"], {"axis": 1}), ) def test_series_drop_raises(): gs = cudf.Series([10, 20, 30], index=["x", "y", "z"], name="c") ps = gs.to_pandas() assert_exceptions_equal( lfunc=ps.drop, rfunc=gs.drop, lfunc_args_and_kwargs=(["p"],), rfunc_args_and_kwargs=(["p"],), ) # dtype specified mismatch assert_exceptions_equal( lfunc=ps.drop, rfunc=gs.drop, lfunc_args_and_kwargs=([3],), rfunc_args_and_kwargs=([3],), ) expect = ps.drop("p", errors="ignore") actual = gs.drop("p", errors="ignore") assert_eq(actual, expect) @pytest.mark.parametrize( "data", [[[1, 2, 3], None, [4], [], [5, 6]], [1, 2, 3, 4, 5]], ) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize( "p_index", [ None, ["ia", "ib", "ic", "id", "ie"], pd.MultiIndex.from_tuples( [(0, "a"), (0, "b"), (0, "c"), (1, "a"), (1, "b")] ), ], ) def test_explode(data, ignore_index, p_index): pdf = pd.Series(data, index=p_index, name="someseries") gdf = cudf.from_pandas(pdf) expect = pdf.explode(ignore_index) got = gdf.explode(ignore_index) assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "data, expected", [ ( [cudf.Series([1, 2, 3]), cudf.Series([10, 20])], cudf.Series([[1, 2, 3], [10, 20]]), ), ( [cudf.Series([1, 2, 3]), None, cudf.Series([10, 20, np.nan])], cudf.Series([[1, 2, 3], None, [10, 20, np.nan]]), ), ( [cp.array([5, 6]), cudf.NA, cp.array([1])], cudf.Series([[5, 6], None, [1]]), ), ( [None, None, None, None, None, cudf.Series([10, 20])], cudf.Series([None, None, None, None, None, [10, 20]]), ), ], ) def test_nested_series_from_sequence_data(data, expected): actual = cudf.Series(data) assert_eq(actual, expected) @pytest.mark.parametrize( "data", [ cp.ones(5, dtype=cp.float16), np.ones(5, dtype="float16"), pd.Series([0.1, 1.2, 3.3], dtype="float16"), pytest.param( pa.array(np.ones(5, dtype="float16")), marks=pytest.mark.xfail( reason="https://issues.apache.org/jira/browse/ARROW-13762" ), ), ], ) def test_series_upcast_float16(data): actual_series = cudf.Series(data) expected_series = cudf.Series(data, dtype="float32") assert_eq(actual_series, expected_series) @pytest.mark.parametrize( "index", [ pd.RangeIndex(0, 3, 1), [3.0, 1.0, np.nan], ["a", "z", None], pytest.param( pd.RangeIndex(4, -1, -2), marks=[ pytest.mark.xfail( condition=PANDAS_LT_140, reason="https://github.com/pandas-dev/pandas/issues/43591", ) ], ), ], ) @pytest.mark.parametrize("axis", [0, "index"]) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("ignore_index", [True, False]) @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize("na_position", ["first", "last"]) def test_series_sort_index( index, axis, ascending, inplace, ignore_index, na_position ): ps = pd.Series([10, 3, 12], index=index) gs = cudf.from_pandas(ps) expected = ps.sort_index( axis=axis, ascending=ascending, ignore_index=ignore_index, inplace=inplace, na_position=na_position, ) got = gs.sort_index( axis=axis, ascending=ascending, ignore_index=ignore_index, inplace=inplace, na_position=na_position, ) if inplace is True: assert_eq(ps, gs, check_index_type=True) else: assert_eq(expected, got, check_index_type=True) @pytest.mark.parametrize("method", ["md5"]) def test_series_hash_values(method): inputs = cudf.Series( [ "", "0", "A 56 character string to test message padding algorithm.", "A 63 character string to test message padding algorithm, again.", "A 64 character string to test message padding algorithm, again!!", ( "A very long (greater than 128 bytes/char string) to execute " "a multi hash-step data point in the hash function being " "tested. This string needed to be longer." ), "All work and no play makes Jack a dull boy", "!\"#$%&'()*+,-./0123456789:;<=>?@[\\]^_`{|}~", "\x00\x00\x00\x10\x00\x00\x00\x00", "\x00\x00\x00\x00", ] ) def hashlib_compute_digest(data): hasher = getattr(hashlib, method)() hasher.update(data.encode("utf-8")) return hasher.hexdigest() hashlib_validation = inputs.to_pandas().apply(hashlib_compute_digest) validation_results = cudf.Series(hashlib_validation) hash_values = inputs.hash_values(method=method) assert_eq(hash_values, validation_results) def test_series_hash_values_invalid_method(): inputs = cudf.Series(["", "0"]) with pytest.raises(ValueError): inputs.hash_values(method="invalid_method") def test_set_index_unequal_length(): s = cudf.Series(dtype="float64") with pytest.raises(ValueError): s.index = [1, 2, 3] @pytest.mark.parametrize( "lhs, rhs", [("a", "a"), ("a", "b"), (1, 1.0), (None, None), (None, "a")] ) def test_equals_names(lhs, rhs): lhs = cudf.Series([1, 2], name=lhs) rhs = cudf.Series([1, 2], name=rhs) got = lhs.equals(rhs) expect = lhs.to_pandas().equals(rhs.to_pandas()) assert_eq(expect, got) @pytest.mark.parametrize( "data", [[True, False, None, True, False], [None, None], []] ) @pytest.mark.parametrize("bool_dtype", ["bool", "boolean", pd.BooleanDtype()]) def test_nullable_bool_dtype_series(data, bool_dtype): psr = pd.Series(data, dtype=pd.BooleanDtype()) gsr = cudf.Series(data, dtype=bool_dtype) assert_eq(psr, gsr.to_pandas(nullable=True)) @pytest.mark.parametrize("level", [None, 0, "l0", 1, ["l0", 1]]) @pytest.mark.parametrize("drop", [True, False]) @pytest.mark.parametrize("original_name", [None, "original_ser"]) @pytest.mark.parametrize("name", [None, "ser"]) @pytest.mark.parametrize("inplace", [True, False]) def test_reset_index(level, drop, inplace, original_name, name): midx = pd.MultiIndex.from_tuples( [("a", 1), ("a", 2), ("b", 1), ("b", 2)], names=["l0", None] ) ps = pd.Series(range(4), index=midx, name=original_name) gs = cudf.from_pandas(ps) if not drop and inplace: pytest.skip( "For exception checks, see " "test_reset_index_dup_level_name_exceptions" ) with expect_warning_if(name is None and not drop): expect = ps.reset_index( level=level, drop=drop, name=name, inplace=inplace ) got = gs.reset_index(level=level, drop=drop, name=name, inplace=inplace) if inplace: expect = ps got = gs assert_eq(expect, got) @pytest.mark.parametrize("level", [None, 0, 1, [None]]) @pytest.mark.parametrize("drop", [False, True]) @pytest.mark.parametrize("inplace", [False, True]) @pytest.mark.parametrize("original_name", [None, "original_ser"]) @pytest.mark.parametrize("name", [None, "ser"]) def test_reset_index_dup_level_name(level, drop, inplace, original_name, name): # midx levels are named [None, None] midx = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]) ps = pd.Series(range(4), index=midx, name=original_name) gs = cudf.from_pandas(ps) if level == [None] or not drop and inplace: pytest.skip( "For exception checks, see " "test_reset_index_dup_level_name_exceptions" ) with expect_warning_if(name is None and not drop): expect = ps.reset_index( level=level, drop=drop, inplace=inplace, name=name ) got = gs.reset_index(level=level, drop=drop, inplace=inplace, name=name) if inplace: expect = ps got = gs assert_eq(expect, got) @pytest.mark.parametrize("drop", [True, False]) @pytest.mark.parametrize("inplace", [True, False]) @pytest.mark.parametrize("original_name", [None, "original_ser"]) @pytest.mark.parametrize("name", [None, "ser"]) def test_reset_index_named(drop, inplace, original_name, name): ps = pd.Series(range(4), index=["x", "y", "z", "w"], name=original_name) gs = cudf.from_pandas(ps) ps.index.name = "cudf" gs.index.name = "cudf" if not drop and inplace: pytest.skip( "For exception checks, see " "test_reset_index_dup_level_name_exceptions" ) with expect_warning_if(name is None and not drop): expect = ps.reset_index(drop=drop, inplace=inplace, name=name) got = gs.reset_index(drop=drop, inplace=inplace, name=name) if inplace: expect = ps got = gs assert_eq(expect, got) def test_reset_index_dup_level_name_exceptions(): midx = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)]) ps = pd.Series(range(4), index=midx) gs = cudf.from_pandas(ps) # Should specify duplicate level names with level number. assert_exceptions_equal( lfunc=ps.reset_index, rfunc=gs.reset_index, lfunc_args_and_kwargs=( [], {"level": [None]}, ), rfunc_args_and_kwargs=( [], {"level": [None]}, ), ) # Cannot use drop=False and inplace=True to turn a series into dataframe. assert_exceptions_equal( lfunc=ps.reset_index, rfunc=gs.reset_index, lfunc_args_and_kwargs=( [], {"drop": False, "inplace": True}, ), rfunc_args_and_kwargs=( [], {"drop": False, "inplace": True}, ), ) # Pandas raises the above exception should these two inputs crosses. assert_exceptions_equal( lfunc=ps.reset_index, rfunc=gs.reset_index, lfunc_args_and_kwargs=( [], {"level": [None], "drop": False, "inplace": True}, ), rfunc_args_and_kwargs=( [], {"level": [None], "drop": False, "inplace": True}, ), ) def test_series_add_prefix(): cd_s = cudf.Series([1, 2, 3, 4]) pd_s = cd_s.to_pandas() got = cd_s.add_prefix("item_") expected = pd_s.add_prefix("item_") assert_eq(got, expected) def test_series_add_suffix(): cd_s = cudf.Series([1, 2, 3, 4]) pd_s = cd_s.to_pandas() got = cd_s.add_suffix("_item") expected = pd_s.add_suffix("_item") assert_eq(got, expected) @pytest.mark.parametrize( "cudf_series", [ cudf.Series([0.25, 0.5, 0.2, -0.05]), cudf.Series([0, 1, 2, np.nan, 4, cudf.NA, 6]), ], ) @pytest.mark.parametrize("lag", [1, 2, 3, 4]) def test_autocorr(cudf_series, lag): psr = cudf_series.to_pandas() cudf_corr = cudf_series.autocorr(lag=lag) # autocorrelation is undefined (nan) for less than two entries, but pandas # short-circuits when there are 0 entries and bypasses the numpy function # call that generates an error. num_both_valid = (psr.notna() & psr.shift(lag).notna()).sum() with expect_warning_if(num_both_valid == 1, RuntimeWarning): pd_corr = psr.autocorr(lag=lag) assert_eq(pd_corr, cudf_corr) @pytest.mark.parametrize( "data", [ [0, 1, 2, 3], ["abc", "a", None, "hello world", "foo buzz", "", None, "rapids ai"], ], ) def test_series_transpose(data): psr = pd.Series(data=data) csr = cudf.Series(data=data) cudf_transposed = csr.transpose() pd_transposed = psr.transpose() cudf_property = csr.T pd_property = psr.T assert_eq(pd_transposed, cudf_transposed) assert_eq(pd_property, cudf_property) assert_eq(cudf_transposed, csr) @pytest.mark.parametrize( "data", [1, 3, 5, 7, 7], ) def test_series_nunique(data): cd_s = cudf.Series(data) pd_s = cd_s.to_pandas() actual = cd_s.nunique() expected = pd_s.nunique() assert_eq(expected, actual) @pytest.mark.parametrize( "data", [1, 3, 5, 7, 7], ) def test_series_nunique_index(data): cd_s = cudf.Series(data) pd_s = cd_s.to_pandas() actual = cd_s.index.nunique() expected = pd_s.index.nunique() assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [], [1, 2, 3, 4], ["a", "b", "c"], [1.2, 2.2, 4.5], [np.nan, np.nan], [None, None, None], ], ) def test_axes(data): csr = _create_cudf_series_float64_default(data) psr = csr.to_pandas() expected = psr.axes actual = csr.axes for e, a in zip(expected, actual): assert_eq(e, a) def test_series_truncate(): csr = cudf.Series([1, 2, 3, 4]) psr = csr.to_pandas() assert_eq(csr.truncate(), psr.truncate()) assert_eq(csr.truncate(1, 2), psr.truncate(1, 2)) assert_eq(csr.truncate(before=1, after=2), psr.truncate(before=1, after=2)) def test_series_truncate_errors(): csr = cudf.Series([1, 2, 3, 4]) with pytest.raises(ValueError): csr.truncate(axis=1) with pytest.raises(ValueError): csr.truncate(copy=False) csr.index = [3, 2, 1, 6] psr = csr.to_pandas() assert_exceptions_equal( lfunc=csr.truncate, rfunc=psr.truncate, ) def test_series_truncate_datetimeindex(): dates = cudf.date_range( "2021-01-01 23:45:00", "2021-01-02 23:46:00", freq="s" ) csr = cudf.Series(range(len(dates)), index=dates) psr = csr.to_pandas() assert_eq( csr.truncate( before="2021-01-01 23:45:18", after="2021-01-01 23:45:27" ), psr.truncate( before="2021-01-01 23:45:18", after="2021-01-01 23:45:27" ), ) @pytest.mark.parametrize( "data", [ [], [0, 12, 14], [0, 14, 12, 12, 3, 10, 12, 14], np.random.randint(-100, 100, 200), pd.Series([0.0, 1.0, None, 10.0]), [None, None, None, None], [np.nan, None, -1, 2, 3], ], ) @pytest.mark.parametrize( "values", [ np.random.randint(-100, 100, 10), [], [np.nan, None, -1, 2, 3], [1.0, 12.0, None, None, 120], [0, 14, 12, 12, 3, 10, 12, 14, None], [None, None, None], ["0", "12", "14"], ["0", "12", "14", "a"], ], ) def test_isin_numeric(data, values): index = np.random.randint(0, 100, len(data)) psr = _create_pandas_series_float64_default(data, index=index) gsr = cudf.Series.from_pandas(psr, nan_as_null=False) expected = psr.isin(values) got = gsr.isin(values) assert_eq(got, expected) @pytest.mark.xfail(raises=TypeError) def test_fill_new_category(): gs = cudf.Series(pd.Categorical(["a", "b", "c"])) gs[0:1] = "d" @pytest.mark.parametrize( "data", [ [], pd.Series( ["2018-01-01", "2019-04-03", None, "2019-12-30"], dtype="datetime64[ns]", ), pd.Series( [ "2018-01-01", "2019-04-03", None, "2019-12-30", "2018-01-01", "2018-01-01", ], dtype="datetime64[ns]", ), ], ) @pytest.mark.parametrize( "values", [ [], [1514764800000000000, 1577664000000000000], [ 1514764800000000000, 1577664000000000000, 1577664000000000000, 1577664000000000000, 1514764800000000000, ], ["2019-04-03", "2019-12-30", "2012-01-01"], [ "2012-01-01", "2012-01-01", "2012-01-01", "2019-04-03", "2019-12-30", "2012-01-01", ], ], ) def test_isin_datetime(data, values): psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.isin(values) expected = psr.isin(values) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ [], pd.Series(["this", "is", None, "a", "test"]), pd.Series(["test", "this", "test", "is", None, "test", "a", "test"]), pd.Series(["0", "12", "14"]), ], ) @pytest.mark.parametrize( "values", [ [], ["this", "is"], [None, None, None], ["12", "14", "19"], [12, 14, 19], ["is", "this", "is", "this", "is"], ], ) def test_isin_string(data, values): psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.isin(values) expected = psr.isin(values) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ [], pd.Series(["a", "b", "c", "c", "c", "d", "e"], dtype="category"), pd.Series(["a", "b", None, "c", "d", "e"], dtype="category"), pd.Series([0, 3, 10, 12], dtype="category"), pd.Series([0, 3, 10, 12, 0, 10, 3, 0, 0, 3, 3], dtype="category"), ], ) @pytest.mark.parametrize( "values", [ [], ["a", "b", None, "f", "words"], ["0", "12", None, "14"], [0, 10, 12, None, 39, 40, 1000], [0, 0, 0, 0, 3, 3, 3, None, 1, 2, 3], ], ) def test_isin_categorical(data, values): psr = _create_pandas_series_float64_default(data) gsr = cudf.Series.from_pandas(psr) got = gsr.isin(values) expected = psr.isin(values) assert_eq(got, expected) @pytest.mark.parametrize("dtype", NUMERIC_TYPES) @pytest.mark.parametrize("period", [-1, -5, -10, -20, 0, 1, 5, 10, 20]) @pytest.mark.parametrize("data_empty", [False, True]) def test_diff(dtype, period, data_empty): if data_empty: data = None else: if dtype == np.int8: # to keep data in range data = gen_rand(dtype, 100000, low=-2, high=2) else: data = gen_rand(dtype, 100000) gs = cudf.Series(data, dtype=dtype) ps = pd.Series(data, dtype=dtype) expected_outcome = ps.diff(period) diffed_outcome = gs.diff(period).astype(expected_outcome.dtype) if data_empty: assert_eq(diffed_outcome, expected_outcome, check_index_type=False) else: assert_eq(diffed_outcome, expected_outcome) @pytest.mark.parametrize( "data", [ ["a", "b", "c", "d", "e"], ], ) def test_diff_unsupported_dtypes(data): gs = cudf.Series(data) with pytest.raises( TypeError, match=r"unsupported operand type\(s\)", ): gs.diff() @pytest.mark.parametrize( "data", [ pd.date_range("2020-01-01", "2020-01-06", freq="D"), [True, True, True, False, True, True], [1.0, 2.0, 3.5, 4.0, 5.0, -1.7], [1, 2, 3, 3, 4, 5], [np.nan, None, None, np.nan, np.nan, None], ], ) def test_diff_many_dtypes(data): ps = pd.Series(data) gs = cudf.from_pandas(ps) assert_eq(ps.diff(), gs.diff()) assert_eq(ps.diff(periods=2), gs.diff(periods=2)) @pytest.mark.parametrize("num_rows", [1, 100]) @pytest.mark.parametrize("num_bins", [1, 10]) @pytest.mark.parametrize("right", [True, False]) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["bool"]) @pytest.mark.parametrize("series_bins", [True, False]) def test_series_digitize(num_rows, num_bins, right, dtype, series_bins): data = np.random.randint(0, 100, num_rows).astype(dtype) bins = np.unique(np.sort(np.random.randint(2, 95, num_bins).astype(dtype))) s = cudf.Series(data) if series_bins: s_bins = cudf.Series(bins) indices = s.digitize(s_bins, right) else: indices = s.digitize(bins, right) np.testing.assert_array_equal( np.digitize(data, bins, right), indices.to_numpy() ) def test_series_digitize_invalid_bins(): s = cudf.Series(np.random.randint(0, 30, 80), dtype="int32") bins = cudf.Series([2, None, None, 50, 90], dtype="int32") with pytest.raises( ValueError, match="`bins` cannot contain null entries." ): _ = s.digitize(bins) @pytest.mark.parametrize( "data,left,right", [ ([0, 1, 2, 3, 4, 5, 10], 0, 5), ([0, 1, 2, 3, 4, 5, 10], 10, 1), ([0, 1, 2, 3, 4, 5], [0, 10, 11] * 2, [1, 2, 5] * 2), (["a", "few", "set", "of", "strings", "xyz", "abc"], "banana", "few"), (["a", "few", "set", "of", "strings", "xyz", "abc"], "phone", "hello"), ( ["a", "few", "set", "of", "strings", "xyz", "abc"], ["a", "hello", "rapids", "ai", "world", "chars", "strs"], ["yes", "no", "hi", "bye", "test", "pass", "fail"], ), ([0, 1, 2, np.nan, 4, np.nan, 10], 10, 1), ], ) @pytest.mark.parametrize("inclusive", ["both", "neither", "left", "right"]) def test_series_between(data, left, right, inclusive): ps = pd.Series(data) gs = cudf.from_pandas(ps, nan_as_null=False) expected = ps.between(left, right, inclusive=inclusive) actual = gs.between(left, right, inclusive=inclusive) assert_eq(expected, actual) @pytest.mark.parametrize( "data,left,right", [ ([0, 1, 2, None, 4, 5, 10], 0, 5), ([0, 1, 2, 3, None, 5, 10], 10, 1), ([None, 1, 2, 3, 4, None], [0, 10, 11] * 2, [1, 2, 5] * 2), ( ["a", "few", "set", None, "strings", "xyz", "abc"], ["a", "hello", "rapids", "ai", "world", "chars", "strs"], ["yes", "no", "hi", "bye", "test", "pass", "fail"], ), ], ) @pytest.mark.parametrize("inclusive", ["both", "neither", "left", "right"]) def test_series_between_with_null(data, left, right, inclusive): gs = cudf.Series(data) ps = gs.to_pandas(nullable=True) expected = ps.between(left, right, inclusive=inclusive) actual = gs.between(left, right, inclusive=inclusive) assert_eq(expected, actual.to_pandas(nullable=True)) def test_default_construction(): s = cudf.Series([np.int8(8), np.int16(128)]) assert s.dtype == np.dtype("i2") @pytest.mark.parametrize( "data", [[0, 1, 2, 3, 4], range(5), [np.int8(8), np.int16(128)]] ) def test_default_integer_bitwidth_construction(default_integer_bitwidth, data): s = cudf.Series(data) assert s.dtype == np.dtype(f"i{default_integer_bitwidth//8}") @pytest.mark.parametrize("data", [[1.5, 2.5, 4.5], [1000, 2000, 4000, 3.14]]) def test_default_float_bitwidth_construction(default_float_bitwidth, data): s = cudf.Series(data) assert s.dtype == np.dtype(f"f{default_float_bitwidth//8}") def test_series_ordered_dedup(): # part of https://github.com/rapidsai/cudf/issues/11486 sr = cudf.Series(np.random.randint(0, 100, 1000)) # pandas unique() preserves order expect = pd.Series(sr.to_pandas().unique()) got = cudf.Series(sr._column.unique()) assert_eq(expect.values, got.values) @pytest.mark.parametrize("dtype", ["int64", "float64"]) @pytest.mark.parametrize("bool_scalar", [True, False]) def test_set_bool_error(dtype, bool_scalar): sr = cudf.Series([1, 2, 3], dtype=dtype) psr = sr.to_pandas(nullable=True) assert_exceptions_equal( lfunc=sr.__setitem__, rfunc=psr.__setitem__, lfunc_args_and_kwargs=([bool_scalar],), rfunc_args_and_kwargs=([bool_scalar],), ) def test_int64_equality(): s = cudf.Series(np.asarray([2**63 - 10, 2**63 - 100], dtype=np.int64)) assert (s != np.int64(2**63 - 1)).all() assert (s != cudf.Scalar(2**63 - 1, dtype=np.int64)).all() @pytest.mark.parametrize("into", [dict, OrderedDict, defaultdict(list)]) def test_series_to_dict(into): gs = cudf.Series(["ab", "de", "zx"], index=[10, 20, 100]) ps = gs.to_pandas() actual = gs.to_dict(into=into) expected = ps.to_dict(into=into) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1, 2, 3], pytest.param( [np.nan, 10, 15, 16], marks=pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/49818" ), ), [np.nan, None, 10, 20], ["ab", "zx", "pq"], ["ab", "zx", None, "pq"], [], ], ) def test_series_hasnans(data): gs = _create_cudf_series_float64_default(data, nan_as_null=False) ps = gs.to_pandas(nullable=True) # Check type to avoid mixing Python bool and NumPy bool assert isinstance(gs.hasnans, bool) assert gs.hasnans == ps.hasnans @pytest.mark.parametrize( "data,index", [ ([1, 2, 3], [10, 11, 12]), ([1, 2, 3, 1, 1, 2, 3, 2], [10, 20, 23, 24, 25, 26, 27, 28]), ([1, None, 2, None, 3, None, 3, 1], [5, 6, 7, 8, 9, 10, 11, 12]), ([np.nan, 1.0, np.nan, 5.4, 5.4, 1.0], ["a", "b", "c", "d", "e", "f"]), ( ["lama", "cow", "lama", None, "beetle", "lama", None, None], [1, 4, 10, 11, 2, 100, 200, 400], ), ], ) @pytest.mark.parametrize("keep", ["first", "last", False]) def test_series_duplicated(data, index, keep): gs = cudf.Series(data, index=index) ps = gs.to_pandas() assert_eq(gs.duplicated(keep=keep), ps.duplicated(keep=keep)) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4], [10, 20, None, None], ], ) @pytest.mark.parametrize("copy", [True, False]) def test_series_copy(data, copy): psr = pd.Series(data) gsr = cudf.from_pandas(psr) new_psr = pd.Series(psr, copy=copy) new_gsr = cudf.Series(gsr, copy=copy) new_psr.iloc[0] = 999 new_gsr.iloc[0] = 999 assert_eq(psr, gsr) assert_eq(new_psr, new_gsr) @pytest.mark.parametrize( "data", [ {"a": 1, "b": 2, "c": 24, "d": 1010}, {"a": 1}, ], ) @pytest.mark.parametrize( "index", [None, ["b", "c"], ["d", "a", "c", "b"], ["a"]] ) def test_series_init_dict_with_index(data, index): pandas_series = pd.Series(data, index=index) cudf_series = cudf.Series(data, index=index) assert_eq(pandas_series, cudf_series) @pytest.mark.parametrize("data", ["abc", None, 1, 3.7]) @pytest.mark.parametrize( "index", [None, ["b", "c"], ["d", "a", "c", "b"], ["a"]] ) def test_series_init_scalar_with_index(data, index): pandas_series = _create_pandas_series_float64_default(data, index=index) cudf_series = _create_cudf_series_float64_default(data, index=index) assert_eq( pandas_series, cudf_series, check_index_type=False if data is None and index is None else True, ) def test_series_init_error(): assert_exceptions_equal( lfunc=pd.Series, rfunc=cudf.Series, lfunc_args_and_kwargs=([], {"data": [11], "index": [10, 11]}), rfunc_args_and_kwargs=([], {"data": [11], "index": [10, 11]}), ) def test_series_init_from_series_and_index(): ser = cudf.Series([4, 7, -5, 3], index=["d", "b", "a", "c"]) result = cudf.Series(ser, index=list("abcd")) expected = cudf.Series([-5, 7, 3, 4], index=list("abcd")) assert_eq(result, expected) @pytest.mark.parametrize( "dtype", ["datetime64[ns]", "timedelta64[ns]", "object", "str"] ) def test_series_mixed_dtype_error(dtype): ps = pd.concat([pd.Series([1, 2, 3], dtype=dtype), pd.Series([10, 11])]) with pytest.raises(TypeError): cudf.Series(ps) with pytest.raises(TypeError): cudf.Series(ps.array) @pytest.mark.parametrize("data", [[True, False, None], [10, 200, 300]]) @pytest.mark.parametrize("index", [None, [10, 20, 30]]) def test_series_contains(data, index): ps = pd.Series(data, index=index) gs = cudf.Series(data, index=index) assert_eq(1 in ps, 1 in gs) assert_eq(10 in ps, 10 in gs) assert_eq(True in ps, True in gs) assert_eq(False in ps, False in gs) def test_series_from_pandas_sparse(): pser = pd.Series(range(2), dtype=pd.SparseDtype(np.int64, 0)) with pytest.raises(NotImplementedError): cudf.Series(pser) def test_series_constructor_unbounded_sequence(): class A: def __getitem__(self, key): return 1 with pytest.raises(TypeError): cudf.Series(A()) def test_series_constructor_error_mixed_type(): with pytest.raises(pa.ArrowTypeError): cudf.Series(["abc", np.nan, "123"], nan_as_null=False) def test_series_typecast_to_object_error(): actual = cudf.Series([1, 2, 3], dtype="datetime64[ns]") with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(ValueError): actual.astype(object) with pytest.raises(ValueError): actual.astype(np.dtype("object")) new_series = actual.astype("str") assert new_series[0] == "1970-01-01 00:00:00.000000001" def test_series_typecast_to_object(): actual = cudf.Series([1, 2, 3], dtype="datetime64[ns]") with cudf.option_context("mode.pandas_compatible", False): new_series = actual.astype(object) assert new_series[0] == "1970-01-01 00:00:00.000000001" new_series = actual.astype(np.dtype("object")) assert new_series[0] == "1970-01-01 00:00:00.000000001" @pytest.mark.parametrize("attr", ["nlargest", "nsmallest"]) def test_series_nlargest_nsmallest_str_error(attr): gs = cudf.Series(["a", "b", "c", "d", "e"]) ps = gs.to_pandas() assert_exceptions_equal( getattr(gs, attr), getattr(ps, attr), ([], {"n": 1}), ([], {"n": 1}) ) def test_series_unique_pandas_compatibility(): gs = cudf.Series([10, 11, 12, 11, 10]) ps = gs.to_pandas() with cudf.option_context("mode.pandas_compatible", True): actual = gs.unique() expected = ps.unique() assert_eq(actual, expected) @pytest.mark.parametrize("initial_name", SERIES_OR_INDEX_NAMES) @pytest.mark.parametrize("name", SERIES_OR_INDEX_NAMES) def test_series_rename(initial_name, name): gsr = cudf.Series([1, 2, 3], name=initial_name) psr = pd.Series([1, 2, 3], name=initial_name) assert_eq(gsr, psr) actual = gsr.rename(name) expected = psr.rename(name) assert_eq(actual, expected) @pytest.mark.parametrize( "data", [ [1.2234242333234, 323432.3243423, np.nan], pd.Series([34224, 324324, 324342], dtype="datetime64[ns]"), pd.Series([224.242, None, 2424.234324], dtype="category"), [ decimal.Decimal("342.3243234234242"), decimal.Decimal("89.32432497687622"), None, ], ], ) @pytest.mark.parametrize("digits", [0, 1, 3, 4, 10]) def test_series_round_builtin(data, digits): ps = pd.Series(data) gs = cudf.from_pandas(ps, nan_as_null=False) # TODO: Remove `to_frame` workaround # after following issue is fixed: # https://github.com/pandas-dev/pandas/issues/55114 expected = round(ps.to_frame(), digits)[0] expected.name = None actual = round(gs, digits) assert_eq(expected, actual) def test_series_empty_warning(): with pytest.warns(FutureWarning): expected = pd.Series([]) with pytest.warns(FutureWarning): actual = cudf.Series([]) assert_eq(expected, actual) @pytest.mark.filterwarnings("ignore::FutureWarning") # tested above @pytest.mark.parametrize("data", [None, {}, []]) def test_series_empty_index_rangeindex(data): expected = cudf.RangeIndex(0) result = cudf.Series(data).index assert_eq(result, expected) def test_series_count_invalid_param(): s = cudf.Series([], dtype="float64") with pytest.raises(TypeError): s.count(skipna=True) @pytest.mark.parametrize( "data", [[0, 1, 2], ["a", "b", "c"], [0.324, 32.32, 3243.23]] ) def test_series_setitem_nat_with_non_datetimes(data): s = cudf.Series(data) with pytest.raises(TypeError): s[0] = cudf.NaT def test_series_string_setitem(): gs = cudf.Series(["abc", "def", "ghi", "xyz", "pqr"]) ps = gs.to_pandas() gs[0] = "NaT" gs[1] = "NA" gs[2] = "<NA>" gs[3] = "NaN" ps[0] = "NaT" ps[1] = "NA" ps[2] = "<NA>" ps[3] = "NaN" assert_eq(gs, ps) def test_multi_dim_series_error(): arr = cp.array([(1, 2), (3, 4)]) with pytest.raises(ValueError): cudf.Series(arr) def test_bool_series_mixed_dtype_error(): ps = pd.Series([True, False, None]) # ps now has `object` dtype, which # isn't supported by `cudf`. with pytest.raises(TypeError): cudf.Series(ps) with pytest.raises(TypeError): cudf.from_pandas(ps) @pytest.mark.parametrize( "pandas_type", [ pd.ArrowDtype(pa.int8()), pd.ArrowDtype(pa.int16()), pd.ArrowDtype(pa.int32()), pd.ArrowDtype(pa.int64()), pd.ArrowDtype(pa.uint8()), pd.ArrowDtype(pa.uint16()), pd.ArrowDtype(pa.uint32()), pd.ArrowDtype(pa.uint64()), pd.ArrowDtype(pa.float32()), pd.ArrowDtype(pa.float64()), pd.Int8Dtype(), pd.Int16Dtype(), pd.Int32Dtype(), pd.Int64Dtype(), pd.UInt8Dtype(), pd.UInt16Dtype(), pd.UInt32Dtype(), pd.UInt64Dtype(), pd.Float32Dtype(), pd.Float64Dtype(), ], ) def test_series_arrow_numeric_types_roundtrip(pandas_type): ps = pd.Series([1, 2, 3], dtype=pandas_type) pi = pd.Index(ps) pdf = ps.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pi) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) @pytest.mark.parametrize( "pandas_type", [pd.ArrowDtype(pa.bool_()), pd.BooleanDtype()] ) def test_series_arrow_bool_types_roundtrip(pandas_type): ps = pd.Series([True, False, None], dtype=pandas_type) pi = pd.Index(ps) pdf = ps.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pi) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) @pytest.mark.parametrize( "pandas_type", [pd.ArrowDtype(pa.string()), pd.StringDtype()] ) def test_series_arrow_string_types_roundtrip(pandas_type): ps = pd.Series(["abc", None, "xyz"], dtype=pandas_type) pi = pd.Index(ps) pdf = ps.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pi) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) def test_series_arrow_category_types_roundtrip(): pa_array = pa.array(pd.Series([1, 2, 3], dtype="category")) ps = pd.Series([1, 2, 3], dtype=pd.ArrowDtype(pa_array.type)) pi = pd.Index(ps) pdf = pi.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pi) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) @pytest.mark.parametrize( "pa_type", [pa.decimal128(10, 2), pa.decimal128(5, 2), pa.decimal128(20, 2)], ) def test_series_arrow_decimal_types_roundtrip(pa_type): ps = pd.Series( [ decimal.Decimal("1.2"), decimal.Decimal("20.56"), decimal.Decimal("3"), ], dtype=pd.ArrowDtype(pa_type), ) pdf = ps.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) def test_series_arrow_struct_types_roundtrip(): ps = pd.Series( [{"a": 1}, {"b": "abc"}], dtype=pd.ArrowDtype(pa.struct({"a": pa.int64(), "b": pa.string()})), ) pdf = ps.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) def test_series_arrow_list_types_roundtrip(): ps = pd.Series([[1], [2], [4]], dtype=pd.ArrowDtype(pa.list_(pa.int64()))) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) pdf = ps.to_frame() with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(ps) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): cudf.from_pandas(pdf) @pytest.mark.parametrize("reso", ["M", "ps"]) @pytest.mark.parametrize("typ", ["M", "m"]) def test_series_invalid_reso_dtype(reso, typ): with pytest.raises(NotImplementedError): cudf.Series([], dtype=f"{typ}8[{reso}]") def test_series_categorical_missing_value_count(): ps = pd.Series(pd.Categorical(list("abcccb"), categories=list("cabd"))) gs = cudf.from_pandas(ps) expected = ps.value_counts() actual = gs.value_counts() assert_eq(expected, actual, check_dtype=False) def test_series_error_nan_mixed_types(): ps = pd.Series([np.nan, "ab", "cd"]) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(pa.ArrowInvalid): cudf.from_pandas(ps) def test_series_error_nan_non_float_dtypes(): s = cudf.Series(["a", "b", "c"]) with pytest.raises(TypeError): s[0] = np.nan s = cudf.Series([1, 2, 3], dtype="datetime64[ns]") with pytest.raises(TypeError): s[0] = np.nan @pytest.mark.parametrize( "dtype", [ pd.ArrowDtype(pa.int8()), pd.ArrowDtype(pa.int16()), pd.ArrowDtype(pa.int32()), pd.ArrowDtype(pa.int64()), pd.ArrowDtype(pa.uint8()), pd.ArrowDtype(pa.uint16()), pd.ArrowDtype(pa.uint32()), pd.ArrowDtype(pa.uint64()), pd.ArrowDtype(pa.float32()), pd.ArrowDtype(pa.float64()), pd.Int8Dtype(), pd.Int16Dtype(), pd.Int32Dtype(), pd.Int64Dtype(), pd.UInt8Dtype(), pd.UInt16Dtype(), pd.UInt32Dtype(), pd.UInt64Dtype(), pd.Float32Dtype(), pd.Float64Dtype(), ], ) @pytest.mark.parametrize("klass", [cudf.Series, cudf.DataFrame, cudf.Index]) @pytest.mark.parametrize("kind", [lambda x: x, str], ids=["obj", "string"]) def test_astype_pandas_nullable_pandas_compat(dtype, klass, kind): ser = klass([1, 2, 3]) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): ser.astype(kind(dtype)) def test_series_where_mixed_bool_dtype(): s = cudf.Series([True, False, True]) with pytest.raises(TypeError): s.where(~s, 10) def test_series_setitem_mixed_bool_dtype(): s = cudf.Series([True, False, True]) with pytest.raises(TypeError): s[0] = 10 def test_series_duplicate_index_reindex(): gs = cudf.Series([0, 1, 2, 3], index=[0, 0, 1, 1]) ps = gs.to_pandas() assert_exceptions_equal( gs.reindex, ps.reindex, lfunc_args_and_kwargs=([10, 11, 12, 13], {}), rfunc_args_and_kwargs=([10, 11, 12, 13], {}), )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_spilling.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. import importlib import random import time import warnings import weakref from concurrent.futures import ThreadPoolExecutor from typing import List, Tuple import cupy import numpy as np import pandas import pandas.testing import pytest import rmm import cudf import cudf.core.buffer.spill_manager import cudf.options from cudf.core.abc import Serializable from cudf.core.buffer import ( Buffer, acquire_spill_lock, as_buffer, get_spill_lock, ) from cudf.core.buffer.spill_manager import ( SpillManager, get_global_manager, get_rmm_memory_resource_stack, set_global_manager, ) from cudf.core.buffer.spillable_buffer import ( SpillableBuffer, SpillableBufferSlice, SpillLock, ) from cudf.testing._utils import assert_eq if get_global_manager() is not None: pytest.skip( "cannot test spilling when enabled globally, set `CUDF_SPILL=off`", allow_module_level=True, ) def single_column_df(target="gpu") -> cudf.DataFrame: """Create a standard single column dataframe used for testing Use `single_column_df_data`, `single_column_df_base_data`, `gen_df_data_nbytes` for easy access to the buffer of the column. Notice, this is just for convenience, there is nothing special about this dataframe. Parameters ---------- target : str, optional Set the spill state of the dataframe Return ------ DataFrame A standard dataframe with a single column """ ret = cudf.DataFrame({"a": [1, 2, 3]}) if target != "gpu": single_column_df_data(ret).spill(target=target) return ret def single_column_df_data(df: cudf.DataFrame) -> SpillableBuffer: """Access `.data` of the column of a standard dataframe""" ret = df._data._data["a"].data assert isinstance(ret, SpillableBuffer) return ret def single_column_df_base_data(df: cudf.DataFrame) -> SpillableBuffer: """Access `.base_data` of the column of a standard dataframe""" ret = df._data._data["a"].base_data assert isinstance(ret, SpillableBuffer) return ret # Get number of bytes of the column of a standard dataframe gen_df_data_nbytes = single_column_df()._data._data["a"].data.nbytes def spilled_and_unspilled(manager: SpillManager) -> Tuple[int, int]: """Get bytes spilled and unspilled known by the manager""" spilled = sum(buf.size for buf in manager.buffers() if buf.is_spilled) unspilled = sum( buf.size for buf in manager.buffers() if not buf.is_spilled ) return spilled, unspilled @pytest.fixture def manager(request): """Fixture to enable and make a spilling manager availabe""" kwargs = dict(getattr(request, "param", {})) with warnings.catch_warnings(): warnings.simplefilter("error") set_global_manager(manager=SpillManager(**kwargs)) yield get_global_manager() # Retrieving the test result using the `pytest_runtest_makereport` # hook from conftest.py if request.node.report["call"].failed: # Ignore `overwriting non-empty manager` errors when # test is failing. warnings.simplefilter("ignore") set_global_manager(manager=None) def test_spillable_buffer(manager: SpillManager): buf = as_buffer(data=rmm.DeviceBuffer(size=10), exposed=False) assert isinstance(buf, SpillableBuffer) assert buf.spillable buf.mark_exposed() assert buf.exposed assert not buf.spillable buf = as_buffer(data=rmm.DeviceBuffer(size=10), exposed=False) # Notice, accessing `__cuda_array_interface__` itself doesn't # expose the pointer, only accessing the "data" field exposes # the pointer. iface = buf.__cuda_array_interface__ assert not buf.exposed assert buf.spillable iface["data"][0] # Expose pointer assert buf.exposed assert not buf.spillable @pytest.mark.parametrize( "attribute", [ "get_ptr", "memoryview", "is_spilled", "exposed", "spillable", "spill_lock", "spill", "memory_info", ], ) def test_spillable_buffer_view_attributes(manager: SpillManager, attribute): base = as_buffer(data=rmm.DeviceBuffer(size=10), exposed=False) view = base[:] attr_base = getattr(base, attribute) attr_view = getattr(view, attribute) if callable(attr_view): pass else: assert attr_base == attr_view @pytest.mark.parametrize("target", ["gpu", "cpu"]) def test_memory_info(manager: SpillManager, target): if target == "gpu": mem = rmm.DeviceBuffer(size=10) ptr = mem.ptr elif target == "cpu": mem = np.empty(10, dtype="u1") ptr = mem.__array_interface__["data"][0] b = as_buffer(data=mem, exposed=False) assert b.memory_info() == (ptr, mem.size, target) assert b[:].memory_info() == (ptr, mem.size, target) assert b[:-1].memory_info() == (ptr, mem.size - 1, target) assert b[1:].memory_info() == (ptr + 1, mem.size - 1, target) assert b[2:4].memory_info() == (ptr + 2, 2, target) def test_from_pandas(manager: SpillManager): pdf1 = pandas.DataFrame({"a": [1, 2, 3]}) df = cudf.from_pandas(pdf1) assert single_column_df_data(df).spillable pdf2 = df.to_pandas() pandas.testing.assert_frame_equal(pdf1, pdf2) def test_creations(manager: SpillManager): df = single_column_df() assert single_column_df_data(df).spillable df = cudf.datasets.timeseries(dtypes={"a": float}) assert single_column_df_data(df).spillable df = cudf.datasets.randomdata(dtypes={"a": float}) assert single_column_df_data(df).spillable def test_spillable_df_groupby(manager: SpillManager): df = cudf.DataFrame({"a": [1, 1, 1]}) gb = df.groupby("a") assert len(single_column_df_base_data(df)._spill_locks) == 0 gb._groupby # `gb._groupby`, which is cached on `gb`, holds a spill lock assert len(single_column_df_base_data(df)._spill_locks) == 1 assert not single_column_df_data(df).spillable del gb assert single_column_df_data(df).spillable def test_spilling_buffer(manager: SpillManager): buf = as_buffer(rmm.DeviceBuffer(size=10), exposed=False) buf.spill(target="cpu") assert buf.is_spilled buf.mark_exposed() # Expose pointer and trigger unspill assert not buf.is_spilled with pytest.raises(ValueError, match="unspillable buffer"): buf.spill(target="cpu") def test_environment_variables(monkeypatch): def reload_options(): # In order to enabling monkey patching of the environment variables # mark the global manager as uninitialized. set_global_manager(None) cudf.core.buffer.spill_manager._global_manager_uninitialized = True importlib.reload(cudf.options) monkeypatch.setenv("CUDF_SPILL_ON_DEMAND", "off") monkeypatch.setenv("CUDF_SPILL", "off") reload_options() assert get_global_manager() is None monkeypatch.setenv("CUDF_SPILL", "on") reload_options() manager = get_global_manager() assert isinstance(manager, SpillManager) assert manager._spill_on_demand is False assert manager._device_memory_limit is None assert manager.statistics.level == 0 monkeypatch.setenv("CUDF_SPILL_DEVICE_LIMIT", "1000") reload_options() manager = get_global_manager() assert isinstance(manager, SpillManager) assert manager._device_memory_limit == 1000 assert manager.statistics.level == 0 monkeypatch.setenv("CUDF_SPILL_STATS", "1") reload_options() manager = get_global_manager() assert isinstance(manager, SpillManager) assert manager.statistics.level == 1 monkeypatch.setenv("CUDF_SPILL_STATS", "2") reload_options() manager = get_global_manager() assert isinstance(manager, SpillManager) assert manager.statistics.level == 2 def test_spill_device_memory(manager: SpillManager): df = single_column_df() assert spilled_and_unspilled(manager) == (0, gen_df_data_nbytes) manager.spill_device_memory(nbytes=1) assert spilled_and_unspilled(manager) == (gen_df_data_nbytes, 0) del df assert spilled_and_unspilled(manager) == (0, 0) df1 = single_column_df() df2 = single_column_df() manager.spill_device_memory(nbytes=1) assert single_column_df_data(df1).is_spilled assert not single_column_df_data(df2).is_spilled manager.spill_device_memory(nbytes=1) assert single_column_df_data(df1).is_spilled assert single_column_df_data(df2).is_spilled df3 = df1 + df2 assert not single_column_df_data(df1).is_spilled assert not single_column_df_data(df2).is_spilled assert not single_column_df_data(df3).is_spilled manager.spill_device_memory(nbytes=1) assert single_column_df_data(df1).is_spilled assert not single_column_df_data(df2).is_spilled assert not single_column_df_data(df3).is_spilled df2.abs() # Should change the access time manager.spill_device_memory(nbytes=1) assert single_column_df_data(df1).is_spilled assert not single_column_df_data(df2).is_spilled assert single_column_df_data(df3).is_spilled def test_spill_to_device_limit(manager: SpillManager): df1 = single_column_df() df2 = single_column_df() assert spilled_and_unspilled(manager) == (0, gen_df_data_nbytes * 2) manager.spill_to_device_limit(device_limit=0) assert spilled_and_unspilled(manager) == (gen_df_data_nbytes * 2, 0) df3 = df1 + df2 manager.spill_to_device_limit(device_limit=0) assert spilled_and_unspilled(manager) == (gen_df_data_nbytes * 3, 0) assert single_column_df_data(df1).is_spilled assert single_column_df_data(df2).is_spilled assert single_column_df_data(df3).is_spilled @pytest.mark.parametrize( "manager", [{"device_memory_limit": 0}], indirect=True ) def test_zero_device_limit(manager: SpillManager): assert manager._device_memory_limit == 0 df1 = single_column_df() df2 = single_column_df() assert spilled_and_unspilled(manager) == (gen_df_data_nbytes * 2, 0) df1 + df2 # Notice, while performing the addintion both df1 and df2 are unspillable assert spilled_and_unspilled(manager) == (0, gen_df_data_nbytes * 2) manager.spill_to_device_limit() assert spilled_and_unspilled(manager) == (gen_df_data_nbytes * 2, 0) def test_spill_df_index(manager: SpillManager): df = single_column_df() df.index = [1, 3, 2] # use a materialized index assert spilled_and_unspilled(manager) == (0, gen_df_data_nbytes * 2) manager.spill_to_device_limit(gen_df_data_nbytes) assert spilled_and_unspilled(manager) == ( gen_df_data_nbytes, gen_df_data_nbytes, ) manager.spill_to_device_limit(0) assert spilled_and_unspilled(manager) == (gen_df_data_nbytes * 2, 0) def test_external_memory(manager): cupy.cuda.set_allocator() # uses default allocator cpy = cupy.asarray([1, 2, 3]) s = cudf.Series(cpy) # Check that the cupy array is still alive after overwriting `cpy` cpy = weakref.ref(cpy) assert cpy() is not None # Check that the series is spillable and known by the spill manager assert len(manager.buffers()) == 1 assert s._data[None].data.spillable def test_spilling_df_views(manager): df = single_column_df(target="cpu") assert single_column_df_data(df).is_spilled df_view = df.loc[1:] assert single_column_df_data(df_view).spillable assert single_column_df_data(df).spillable def test_modify_spilled_views(manager): df = single_column_df() df_view = df.iloc[1:] buf = single_column_df_data(df) buf.spill(target="cpu") # modify the spilled df and check that the changes are reflected # in the view df.iloc[1:] = 0 assert_eq(df_view, df.iloc[1:]) # now, modify the view and check that the changes are reflected in # the df df_view.iloc[:] = -1 assert_eq(df_view, df.iloc[1:]) @pytest.mark.parametrize("target", ["gpu", "cpu"]) def test_get_ptr(manager: SpillManager, target): if target == "gpu": mem = rmm.DeviceBuffer(size=10) elif target == "cpu": mem = np.empty(10, dtype="u1") buf = as_buffer(data=mem, exposed=False) assert buf.spillable assert len(buf._spill_locks) == 0 with acquire_spill_lock(): buf.get_ptr(mode="read") assert not buf.spillable with acquire_spill_lock(): buf.get_ptr(mode="read") assert not buf.spillable assert not buf.spillable assert buf.spillable def test_get_spill_lock(manager: SpillManager): @acquire_spill_lock() def f(sleep=False, nest=0): if sleep: time.sleep(random.random() / 100) if nest: return f(nest=nest - 1) return get_spill_lock() assert get_spill_lock() is None slock = f() assert isinstance(slock, SpillLock) assert get_spill_lock() is None slock = f(nest=2) assert isinstance(slock, SpillLock) assert get_spill_lock() is None with ThreadPoolExecutor(max_workers=2) as executor: futures_with_spill_lock = [] futures_without_spill_lock = [] for _ in range(100): futures_with_spill_lock.append( executor.submit(f, sleep=True, nest=1) ) futures_without_spill_lock.append( executor.submit(f, sleep=True, nest=1) ) all(isinstance(f.result(), SpillLock) for f in futures_with_spill_lock) all(f is None for f in futures_without_spill_lock) def test_get_spill_lock_no_manager(): """When spilling is disabled, get_spill_lock() should return None always""" @acquire_spill_lock() def f(): return get_spill_lock() assert get_spill_lock() is None assert f() is None @pytest.mark.parametrize("target", ["gpu", "cpu"]) @pytest.mark.parametrize("view", [None, slice(0, 2), slice(1, 3)]) def test_serialize_device(manager, target, view): df1 = single_column_df() if view is not None: df1 = df1.iloc[view] single_column_df_data(df1).spill(target=target) header, frames = df1.device_serialize() assert len(frames) == 1 if target == "gpu": assert isinstance(frames[0], Buffer) assert not single_column_df_data(df1).is_spilled assert not single_column_df_data(df1).spillable frames[0] = cupy.array(frames[0], copy=True) else: assert isinstance(frames[0], memoryview) assert single_column_df_data(df1).is_spilled assert single_column_df_data(df1).spillable df2 = Serializable.device_deserialize(header, frames) assert_eq(df1, df2) @pytest.mark.parametrize("target", ["gpu", "cpu"]) @pytest.mark.parametrize("view", [None, slice(0, 2), slice(1, 3)]) def test_serialize_host(manager, target, view): df1 = single_column_df() if view is not None: df1 = df1.iloc[view] single_column_df_data(df1).spill(target=target) # Unspilled df becomes spilled after host serialization header, frames = df1.host_serialize() assert all(isinstance(f, memoryview) for f in frames) df2 = Serializable.host_deserialize(header, frames) assert single_column_df_data(df2).is_spilled assert_eq(df1, df2) def test_serialize_dask_dataframe(manager: SpillManager): protocol = pytest.importorskip("distributed.protocol") df1 = single_column_df(target="gpu") header, frames = protocol.serialize( df1, serializers=("dask",), on_error="raise" ) buf = single_column_df_data(df1) assert len(frames) == 1 assert isinstance(frames[0], memoryview) # Check that the memoryview and frames is the same memory assert ( np.array(buf.memoryview()).__array_interface__["data"] == np.array(frames[0]).__array_interface__["data"] ) df2 = protocol.deserialize(header, frames) assert single_column_df_data(df2).is_spilled assert_eq(df1, df2) def test_serialize_cuda_dataframe(manager: SpillManager): protocol = pytest.importorskip("distributed.protocol") df1 = single_column_df(target="gpu") header, frames = protocol.serialize( df1, serializers=("cuda",), on_error="raise" ) buf: SpillableBufferSlice = single_column_df_data(df1) assert len(buf._base._spill_locks) == 1 assert len(frames) == 1 assert isinstance(frames[0], Buffer) assert frames[0].get_ptr(mode="read") == buf.get_ptr(mode="read") frames[0] = cupy.array(frames[0], copy=True) df2 = protocol.deserialize(header, frames) assert_eq(df1, df2) def test_get_rmm_memory_resource_stack(): mr1 = rmm.mr.get_current_device_resource() assert all( not isinstance(m, rmm.mr.FailureCallbackResourceAdaptor) for m in get_rmm_memory_resource_stack(mr1) ) mr2 = rmm.mr.FailureCallbackResourceAdaptor(mr1, lambda x: False) assert get_rmm_memory_resource_stack(mr2)[0] is mr2 assert get_rmm_memory_resource_stack(mr2)[1] is mr1 mr3 = rmm.mr.FixedSizeMemoryResource(mr2) assert get_rmm_memory_resource_stack(mr3)[0] is mr3 assert get_rmm_memory_resource_stack(mr3)[1] is mr2 assert get_rmm_memory_resource_stack(mr3)[2] is mr1 mr4 = rmm.mr.FailureCallbackResourceAdaptor(mr3, lambda x: False) assert get_rmm_memory_resource_stack(mr4)[0] is mr4 assert get_rmm_memory_resource_stack(mr4)[1] is mr3 assert get_rmm_memory_resource_stack(mr4)[2] is mr2 assert get_rmm_memory_resource_stack(mr4)[3] is mr1 def test_df_transpose(manager: SpillManager): df1 = cudf.DataFrame({"a": [1, 2]}) df2 = df1.transpose() # For now, all buffers are marked as exposed assert df1._data._data["a"].data.exposed assert df2._data._data[0].data.exposed assert df2._data._data[1].data.exposed def test_as_buffer_of_spillable_buffer(manager: SpillManager): data = cupy.arange(10, dtype="u1") b1 = as_buffer(data, exposed=False) assert isinstance(b1, SpillableBuffer) assert b1.owner is data b2 = as_buffer(b1) assert b1 is b2 with pytest.raises( ValueError, match="buffer must either be exposed or spilled locked", ): # Use `memory_info` to access device point _without_ making # the buffer unspillable. b3 = as_buffer(b1.memory_info()[0], size=b1.size, owner=b1) with acquire_spill_lock(): b3 = as_buffer(b1.get_ptr(mode="read"), size=b1.size, owner=b1) assert isinstance(b3, SpillableBufferSlice) assert b3.owner is b1 b4 = as_buffer( b1.get_ptr(mode="write") + data.itemsize, size=b1.size - data.itemsize, owner=b3, ) assert isinstance(b4, SpillableBufferSlice) assert b4.owner is b1 assert all(cupy.array(b4.memoryview()) == data[1:]) b5 = as_buffer(b4.get_ptr(mode="write"), size=b4.size - 1, owner=b4) assert isinstance(b5, SpillableBufferSlice) assert b5.owner is b1 assert all(cupy.array(b5.memoryview()) == data[1:-1]) @pytest.mark.parametrize("dtype", ["uint8", "uint64"]) def test_memoryview_slice(manager: SpillManager, dtype): """Check .memoryview() of a sliced spillable buffer""" data = np.arange(10, dtype=dtype) # memoryview of a sliced spillable buffer m1 = as_buffer(data=data)[1:-1].memoryview() # sliced memoryview of data as bytes m2 = memoryview(data).cast("B")[1:-1] assert m1 == m2 @pytest.mark.parametrize( "manager", [{"statistic_level": 0}, {"statistic_level": 1}], indirect=True ) def test_statistics(manager: SpillManager): assert len(manager.statistics.spill_totals) == 0 buf: SpillableBuffer = as_buffer( data=rmm.DeviceBuffer(size=10), exposed=False ) buf.spill(target="cpu") if manager.statistics.level == 0: assert len(manager.statistics.spill_totals) == 0 return assert len(manager.statistics.spill_totals) == 1 nbytes, time = manager.statistics.spill_totals[("gpu", "cpu")] assert nbytes == buf.size assert time > 0 buf.spill(target="gpu") assert len(manager.statistics.spill_totals) == 2 nbytes, time = manager.statistics.spill_totals[("cpu", "gpu")] assert nbytes == buf.size assert time > 0 @pytest.mark.parametrize("manager", [{"statistic_level": 2}], indirect=True) def test_statistics_expose(manager: SpillManager): assert len(manager.statistics.spill_totals) == 0 buffers: List[SpillableBuffer] = [ as_buffer(data=rmm.DeviceBuffer(size=10), exposed=False) for _ in range(10) ] # Expose the first buffer buffers[0].mark_exposed() assert len(manager.statistics.exposes) == 1 stat = list(manager.statistics.exposes.values())[0] assert stat.count == 1 assert stat.total_nbytes == buffers[0].nbytes assert stat.spilled_nbytes == 0 # Expose all 10 buffers for i in range(10): buffers[i].mark_exposed() # The rest of the ptr accesses should accumulate to a single stat # because they resolve to the same traceback. assert len(manager.statistics.exposes) == 2 stat = list(manager.statistics.exposes.values())[1] assert stat.count == 9 assert stat.total_nbytes == buffers[0].nbytes * 9 assert stat.spilled_nbytes == 0 # Create and spill 10 new buffers buffers: List[SpillableBuffer] = [ as_buffer(data=rmm.DeviceBuffer(size=10), exposed=False) for _ in range(10) ] manager.spill_to_device_limit(0) # Expose the new buffers and check that they are counted as spilled for i in range(10): buffers[i].mark_exposed() assert len(manager.statistics.exposes) == 3 stat = list(manager.statistics.exposes.values())[2] assert stat.count == 10 assert stat.total_nbytes == buffers[0].nbytes * 10 assert stat.spilled_nbytes == buffers[0].nbytes * 10
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_dtypes.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.core._compat import PANDAS_GE_150 from cudf.core.column import ColumnBase from cudf.core.dtypes import ( CategoricalDtype, Decimal32Dtype, Decimal64Dtype, Decimal128Dtype, IntervalDtype, ListDtype, StructDtype, ) from cudf.testing._utils import assert_eq from cudf.utils.dtypes import np_to_pa_dtype if PANDAS_GE_150: from pandas.core.arrays.arrow.extension_types import ArrowIntervalType else: from pandas.core.arrays._arrow_utils import ArrowIntervalType def test_cdt_basic(): psr = pd.Series(["a", "b", "a", "c"], dtype="category") sr = cudf.Series(["a", "b", "a", "c"], dtype="category") assert isinstance(sr.dtype, CategoricalDtype) assert_eq(sr.dtype.categories, psr.dtype.categories) @pytest.mark.parametrize( "data", [None, [], ["a"], [1], [1.0], ["a", "b", "c"]] ) @pytest.mark.parametrize("ordered", [None, False, True]) def test_cdt_eq(data, ordered): dt = cudf.CategoricalDtype(categories=data, ordered=ordered) assert dt == "category" assert dt == dt assert dt == cudf.CategoricalDtype(categories=None, ordered=ordered) assert dt == cudf.CategoricalDtype(categories=data, ordered=ordered) assert not dt == cudf.CategoricalDtype( categories=data, ordered=not ordered ) @pytest.mark.parametrize( "data", [None, [], ["a"], [1], [1.0], ["a", "b", "c"]] ) @pytest.mark.parametrize("ordered", [None, False, True]) def test_cdf_to_pandas(data, ordered): assert ( pd.CategoricalDtype(data, ordered) == cudf.CategoricalDtype(categories=data, ordered=ordered).to_pandas() ) @pytest.mark.parametrize( "value_type", [ int, "int32", np.int32, "datetime64[ms]", "datetime64[ns]", "str", "object", ], ) def test_list_dtype_pyarrow_round_trip(value_type): pa_type = pa.list_(cudf.utils.dtypes.np_to_pa_dtype(np.dtype(value_type))) expect = pa_type got = ListDtype.from_arrow(expect).to_arrow() assert expect.equals(got) def test_list_dtype_eq(): lhs = ListDtype("int32") rhs = ListDtype("int32") assert lhs == rhs rhs = ListDtype("int64") assert lhs != rhs def test_list_nested_dtype(): dt = ListDtype(ListDtype("int32")) expect = ListDtype("int32") got = dt.element_type assert expect == got @pytest.mark.parametrize( "fields", [ {}, {"a": "int64"}, {"a": "datetime64[ms]"}, {"a": "int32", "b": "int64"}, ], ) def test_struct_dtype_pyarrow_round_trip(fields): pa_type = pa.struct( { k: cudf.utils.dtypes.np_to_pa_dtype(np.dtype(v)) for k, v in fields.items() } ) expect = pa_type got = StructDtype.from_arrow(expect).to_arrow() assert expect.equals(got) def test_struct_dtype_eq(): lhs = StructDtype( {"a": "int32", "b": StructDtype({"c": "int64", "ab": "int32"})} ) rhs = StructDtype( {"a": "int32", "b": StructDtype({"c": "int64", "ab": "int32"})} ) assert lhs == rhs rhs = StructDtype({"a": "int32", "b": "int64"}) assert lhs != rhs lhs = StructDtype({"b": "int64", "a": "int32"}) assert lhs != rhs @pytest.mark.parametrize( "fields", [ {}, {"a": "int32"}, {"a": "object"}, {"a": "str"}, {"a": "datetime64[D]"}, {"a": "int32", "b": "int64"}, {"a": "int32", "b": StructDtype({"a": "int32", "b": "int64"})}, ], ) def test_struct_dtype_fields(fields): fields = {"a": "int32", "b": StructDtype({"c": "int64", "d": "int32"})} dt = StructDtype(fields) assert_eq(dt.fields, fields) @pytest.mark.parametrize( "decimal_type", [cudf.Decimal32Dtype, cudf.Decimal64Dtype, cudf.Decimal128Dtype], ) def test_decimal_dtype_arrow_roundtrip(decimal_type): dt = decimal_type(4, 2) assert dt.to_arrow() == pa.decimal128(4, 2) assert dt == decimal_type.from_arrow(pa.decimal128(4, 2)) @pytest.mark.parametrize( "decimal_type,max_precision", [ (cudf.Decimal32Dtype, 9), (cudf.Decimal64Dtype, 18), (cudf.Decimal128Dtype, 38), ], ) def test_max_precision(decimal_type, max_precision): decimal_type(scale=0, precision=max_precision) with pytest.raises(ValueError): decimal_type(scale=0, precision=max_precision + 1) @pytest.fixture(params=["int64", "int32"]) def subtype(request): return request.param @pytest.fixture(params=["left", "right", "both", "neither"]) def closed(request): return request.param def test_interval_dtype_pyarrow_round_trip(subtype, closed): pa_array = ArrowIntervalType(subtype, closed) expect = pa_array got = IntervalDtype.from_arrow(expect).to_arrow() assert expect.equals(got) def test_interval_dtype_from_pandas(subtype, closed): expect = cudf.IntervalDtype(subtype, closed=closed) pd_type = pd.IntervalDtype(subtype, closed=closed) got = cudf.IntervalDtype.from_pandas(pd_type) assert expect == got def assert_column_array_dtype_equal(column: ColumnBase, array: pa.array): """ In cudf, each column holds its dtype. And since column may have child columns, child columns also holds their datatype. This method tests that every level of `column` matches the type of the given `array` recursively. """ if isinstance(column.dtype, ListDtype): return array.type.equals( column.dtype.to_arrow() ) and assert_column_array_dtype_equal( column.base_children[1], array.values ) elif isinstance(column.dtype, StructDtype): return array.type.equals(column.dtype.to_arrow()) and all( assert_column_array_dtype_equal(child, array.field(i)) for i, child in enumerate(column.base_children) ) elif isinstance( column.dtype, (Decimal128Dtype, Decimal64Dtype, Decimal32Dtype) ): return array.type.equals(column.dtype.to_arrow()) elif isinstance(column.dtype, CategoricalDtype): raise NotImplementedError() else: return array.type.equals(np_to_pa_dtype(column.dtype)) @pytest.mark.parametrize( "data", [ [[{"name": 123}]], [ [ { "IsLeapYear": False, "data": {"Year": 1999, "Month": 7}, "names": ["Mike", None], }, { "IsLeapYear": True, "data": {"Year": 2004, "Month": 12}, "names": None, }, { "IsLeapYear": False, "data": {"Year": 1996, "Month": 2}, "names": ["Rose", "Richard"], }, ] ], [ [None, {"human?": True, "deets": {"weight": 2.4, "age": 27}}], [ {"human?": None, "deets": {"weight": 5.3, "age": 25}}, {"human?": False, "deets": {"weight": 8.0, "age": 31}}, {"human?": False, "deets": None}, ], [], None, [{"human?": None, "deets": {"weight": 6.9, "age": None}}], ], [ { "name": "var0", "val": [ {"name": "var1", "val": None, "type": "optional<struct>"} ], "type": "list", }, {}, { "name": "var2", "val": [ { "name": "var3", "val": {"field": 42}, "type": "optional<struct>", }, { "name": "var4", "val": {"field": 3.14}, "type": "optional<struct>", }, ], "type": "list", }, None, ], ], ) def test_lists_of_structs_dtype(data): got = cudf.Series(data) expected = pa.array(data) assert_column_array_dtype_equal(got._column, expected) assert expected.equals(got._column.to_arrow()) @pytest.mark.parametrize( "in_dtype,expect", [ (np.dtype("int8"), np.dtype("int8")), (np.int8, np.dtype("int8")), (pd.Int8Dtype(), np.dtype("int8")), (pd.StringDtype(), np.dtype("object")), ("int8", np.dtype("int8")), ("boolean", np.dtype("bool")), ("bool_", np.dtype("bool")), (np.bool_, np.dtype("bool")), (int, np.dtype("int64")), (float, np.dtype("float64")), (cudf.ListDtype("int64"), cudf.ListDtype("int64")), (np.dtype("U"), np.dtype("object")), ("timedelta64[ns]", np.dtype("<m8[ns]")), ("timedelta64[ms]", np.dtype("<m8[ms]")), ("<m8[s]", np.dtype("<m8[s]")), ("datetime64[ns]", np.dtype("<M8[ns]")), ("datetime64[ms]", np.dtype("<M8[ms]")), ("<M8[s]", np.dtype("<M8[s]")), (cudf.ListDtype("int64"), cudf.ListDtype("int64")), ("category", cudf.CategoricalDtype()), ( cudf.CategoricalDtype(categories=("a", "b", "c")), cudf.CategoricalDtype(categories=("a", "b", "c")), ), ( pd.CategoricalDtype(categories=("a", "b", "c")), cudf.CategoricalDtype(categories=("a", "b", "c")), ), ( # this is a pandas.core.arrays.numpy_.PandasDtype... pd.array([1], dtype="int16").dtype, np.dtype("int16"), ), (pd.IntervalDtype("int"), cudf.IntervalDtype("int64")), (cudf.IntervalDtype("int"), cudf.IntervalDtype("int64")), (pd.IntervalDtype("int64"), cudf.IntervalDtype("int64")), ], ) def test_dtype(in_dtype, expect): assert_eq(cudf.dtype(in_dtype), expect) @pytest.mark.parametrize( "in_dtype", [ "complex", np.complex128, complex, "S", "a", "V", "float16", np.float16, "timedelta64", "timedelta64[D]", "datetime64[D]", "datetime64", ], ) def test_dtype_raise(in_dtype): with pytest.raises(TypeError): cudf.dtype(in_dtype) def test_dtype_np_bool_to_pa_bool(): """This test case captures that utility np_to_pa_dtype should map np.bool_ to pa.bool_, nuances on bit width difference should be handled elsewhere. """ assert np_to_pa_dtype(np.dtype("bool")) == pa.bool_()
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_orc.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. import datetime import decimal import os import random from io import BytesIO from string import ascii_lowercase import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.io.orc import ORCWriter from cudf.testing import assert_frame_equal from cudf.testing._utils import ( assert_eq, expect_warning_if, gen_rand_series, supported_numpy_dtypes, ) # Removal of these deprecated features is no longer imminent. They will not be # removed until a suitable alternative has been implemented. As a result, we # also do not want to stop testing them yet. # https://github.com/rapidsai/cudf/issues/11519 pytestmark = pytest.mark.filterwarnings( "ignore:(num_rows|skiprows) is deprecated and will be removed." ) @pytest.fixture(scope="module") def datadir(datadir): return datadir / "orc" @pytest.fixture def path_or_buf(datadir): fname = datadir / "TestOrcFile.test1.orc" try: with open(fname, "rb") as f: buffer = BytesIO(f.read()) except Exception as excpr: if type(excpr).__name__ == "FileNotFoundError": pytest.skip(".parquet file is not found") else: print(type(excpr).__name__) def _make_path_or_buf(src): if src == "filepath": return str(fname) if src == "pathobj": return fname if src == "bytes_io": return buffer if src == "bytes": return buffer.getvalue() if src == "url": return fname.as_uri() raise ValueError("Invalid source type") yield _make_path_or_buf @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["pyarrow", "cudf"]) @pytest.mark.parametrize("use_index", [False, True]) @pytest.mark.parametrize( "inputfile, columns", [ ("TestOrcFile.emptyFile.orc", ["boolean1"]), ( "TestOrcFile.test1.orc", [ "boolean1", "byte1", "short1", "int1", "long1", "float1", "double1", ], ), ("TestOrcFile.RLEv2.orc", ["x", "y"]), ("TestOrcFile.testSnappy.orc", None), ("TestOrcFile.demo-12-zlib.orc", ["_col2", "_col3", "_col4", "_col5"]), ], ) def test_orc_reader_basic(datadir, inputfile, columns, use_index, engine): path = datadir / inputfile expect = pd.read_orc(path, columns=columns) got = cudf.read_orc( path, engine=engine, columns=columns, use_index=use_index ) assert_frame_equal(cudf.from_pandas(expect), got, check_categorical=False) def test_orc_reader_filenotfound(tmpdir): with pytest.raises(FileNotFoundError): cudf.read_orc("TestMissingFile.orc") with pytest.raises(FileNotFoundError): cudf.read_orc(tmpdir.mkdir("cudf_orc")) def test_orc_reader_local_filepath(): path = "~/TestLocalFile.orc" if not os.path.isfile(path): pytest.skip("Local .orc file is not found") cudf.read_orc(path) @pytest.mark.parametrize( "src", ["filepath", "pathobj", "bytes_io", "bytes", "url"] ) def test_orc_reader_filepath_or_buffer(path_or_buf, src): cols = ["int1", "long1", "float1", "double1"] expect = pd.read_orc(path_or_buf("filepath"), columns=cols) got = cudf.read_orc(path_or_buf(src), columns=cols) assert_eq(expect, got) def test_orc_reader_trailing_nulls(datadir): path = datadir / "TestOrcFile.nulls-at-end-snappy.orc" expect = pd.read_orc(path).fillna(0) got = cudf.read_orc(path).fillna(0) # PANDAS uses NaN to represent invalid data, which forces float dtype # For comparison, we can replace NaN with 0 and cast to the cuDF dtype for col in expect.columns: expect[col] = expect[col].astype(got[col].dtype) assert_eq(expect, got, check_categorical=False) @pytest.mark.parametrize("use_index", [False, True]) @pytest.mark.parametrize( "inputfile", ["TestOrcFile.testDate1900.orc", "TestOrcFile.testDate2038.orc"], ) def test_orc_reader_datetimestamp(datadir, inputfile, use_index): from pyarrow import orc path = datadir / inputfile try: orcfile = orc.ORCFile(path) except pa.ArrowIOError as e: pytest.skip(".orc file is not found: %s" % e) pdf = orcfile.read().to_pandas(date_as_object=False) gdf = cudf.read_orc(path, use_index=use_index) assert_eq(pdf, gdf, check_categorical=False) def test_orc_reader_strings(datadir): path = datadir / "TestOrcFile.testStringAndBinaryStatistics.orc" expect = pd.read_orc(path, columns=["string1"]) got = cudf.read_orc(path, columns=["string1"]) assert_eq(expect, got, check_categorical=False) def test_orc_read_statistics(datadir): # Read in file containing 2 columns ("int1" and "string1") and 3 stripes # (sizes 5000, 5000 and 1000 respectively). Each stripe has the same value # in every one of its rows. The values the stripes have are 1, 2, and 3 in # "int1" and "one", "two", and "three" in "string1". path = datadir / "TestOrcFile.testStripeLevelStats.orc" try: ( file_statistics, stripes_statistics, ) = cudf.io.orc.read_orc_statistics([path, path]) except pa.ArrowIOError as e: pytest.skip(".orc file is not found: %s" % e) # Check numberOfValues assert_eq(file_statistics[0]["int1"]["number_of_values"], 11_000) assert_eq( file_statistics[0]["int1"]["number_of_values"], sum( [ stripes_statistics[0]["int1"]["number_of_values"], stripes_statistics[1]["int1"]["number_of_values"], stripes_statistics[2]["int1"]["number_of_values"], ] ), ) assert_eq( stripes_statistics[1]["int1"]["number_of_values"], stripes_statistics[1]["string1"]["number_of_values"], ) assert_eq(stripes_statistics[2]["string1"]["number_of_values"], 1_000) # Check other statistics assert_eq(stripes_statistics[2]["string1"]["has_null"], False) assert_eq( file_statistics[0]["int1"]["minimum"], min( stripes_statistics[0]["int1"]["minimum"], stripes_statistics[1]["int1"]["minimum"], stripes_statistics[2]["int1"]["minimum"], ), ) assert_eq(file_statistics[0]["int1"]["minimum"], 1) assert_eq(file_statistics[0]["string1"]["minimum"], "one") @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["cudf", "pyarrow"]) @pytest.mark.parametrize( "predicate,expected_len", [ ([[("int1", "==", 1)]], 5000), ([[("int1", "<=", 2)]], 10000), ([[("int1", "==", -1)]], 0), ([[("int1", "in", range(3))]], 10000), ([[("int1", "in", {1, 3})]], 6000), ([[("int1", "not in", {1, 3})]], 5000), ], ) def test_orc_read_filtered(datadir, engine, predicate, expected_len): path = datadir / "TestOrcFile.testStripeLevelStats.orc" try: df_filtered = cudf.read_orc(path, engine=engine, filters=predicate) except pa.ArrowIOError as e: pytest.skip(".orc file is not found: %s" % e) # Assert # of rows after filtering assert len(df_filtered) == expected_len @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["cudf", "pyarrow"]) def test_orc_read_stripes(datadir, engine): path = datadir / "TestOrcFile.testDate1900.orc" try: pdf = cudf.read_orc(path, engine=engine) except pa.ArrowIOError as e: pytest.skip(".orc file is not found: %s" % e) num_rows, stripes, col_names = cudf.io.read_orc_metadata(path) # Read stripes one at a time gdf = [ cudf.read_orc(path, engine=engine, stripes=[[i]]) for i in range(stripes) ] gdf = cudf.concat(gdf).reset_index(drop=True) assert_eq(pdf, gdf, check_categorical=False, check_index_type=True) # Read stripes all at once gdf = cudf.read_orc( path, engine=engine, stripes=[[int(x) for x in range(stripes)]] ) assert_eq(pdf, gdf, check_categorical=False) # Read only some stripes gdf = cudf.read_orc(path, engine=engine, stripes=[[0, 1]]) assert_eq(gdf, pdf.head(25000)) gdf = cudf.read_orc(path, engine=engine, stripes=[[0, stripes - 1]]) assert_eq( gdf, cudf.concat([pdf.head(15000), pdf.tail(10000)], ignore_index=True), check_index_type=True, ) @pytest.mark.parametrize("num_rows", [1, 100, 3000]) @pytest.mark.parametrize("skiprows", [0, 1, 3000]) def test_orc_read_rows(datadir, skiprows, num_rows): path = datadir / "TestOrcFile.decimal.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path, skiprows=skiprows, num_rows=num_rows) # Slice rows out of the whole dataframe for comparison as PyArrow doesn't # have an API to read a subsection of rows from the file pdf = pdf[skiprows : skiprows + num_rows] pdf = pdf.reset_index(drop=True) assert_eq(pdf, gdf) def test_orc_read_skiprows(): buff = BytesIO() df = pd.DataFrame( { "a": [ True, False, True, False, None, True, True, True, False, None, False, False, True, True, True, True, ] } ) df.to_orc(buff) # testing 10 skiprows due to a boolean specific bug fix that didn't # repro for other sizes of data skiprows = 10 expected = ( pd.read_orc(buff)[skiprows:].reset_index(drop=True).astype("bool") ) got = cudf.read_orc(buff, skiprows=skiprows) assert_eq(expected, got) def test_orc_reader_uncompressed_block(datadir): path = datadir / "uncompressed_snappy.orc" expect = pd.read_orc(path) got = cudf.read_orc(path) assert_eq(expect, got, check_categorical=False) def test_orc_reader_nodata_block(datadir): path = datadir / "nodata.orc" expect = pd.read_orc(path) got = cudf.read_orc(path, num_rows=1) assert_eq(expect, got, check_categorical=False) @pytest.mark.parametrize("compression", [None, "snappy"]) @pytest.mark.parametrize( "reference_file, columns", [ ( "TestOrcFile.test1.orc", [ "boolean1", "byte1", "short1", "int1", "long1", "float1", "double1", ], ), ("TestOrcFile.demo-12-zlib.orc", ["_col1", "_col3", "_col5"]), ], ) def test_orc_writer(datadir, tmpdir, reference_file, columns, compression): pdf_fname = datadir / reference_file gdf_fname = tmpdir.join("gdf.orc") expect = cudf.from_pandas(pd.read_orc(pdf_fname, columns=columns)) expect.to_orc(gdf_fname.strpath, compression=compression) got = cudf.from_pandas(pd.read_orc(gdf_fname, columns=columns)) assert_frame_equal(expect, got) @pytest.mark.parametrize("stats_freq", ["NONE", "STRIPE", "ROWGROUP"]) def test_orc_writer_statistics_frequency(datadir, tmpdir, stats_freq): reference_file = "TestOrcFile.demo-12-zlib.orc" pdf_fname = datadir / reference_file gdf_fname = tmpdir.join("gdf.orc") expect = cudf.from_pandas(pd.read_orc(pdf_fname)) expect.to_orc(gdf_fname.strpath, statistics=stats_freq) got = cudf.from_pandas(pd.read_orc(gdf_fname)) assert_frame_equal(expect, got) @pytest.mark.parametrize("stats_freq", ["NONE", "STRIPE", "ROWGROUP"]) def test_chunked_orc_writer_statistics_frequency(datadir, tmpdir, stats_freq): reference_file = "TestOrcFile.test1.orc" pdf_fname = datadir / reference_file gdf_fname = tmpdir.join("chunked_gdf.orc") columns = [ "boolean1", "byte1", "short1", "int1", "long1", "float1", "double1", ] pdf = pd.read_orc(pdf_fname, columns=columns) gdf = cudf.from_pandas(pdf) expect = pd.concat([pdf, pdf]).reset_index(drop=True) writer = ORCWriter(gdf_fname, statistics=stats_freq) writer.write_table(gdf) writer.write_table(gdf) writer.close() got = pd.read_orc(gdf_fname) assert_eq(expect, got) @pytest.mark.parametrize("compression", [None, "snappy"]) @pytest.mark.parametrize( "reference_file, columns", [ ( "TestOrcFile.test1.orc", [ "boolean1", "byte1", "short1", "int1", "long1", "float1", "double1", ], ), ("TestOrcFile.demo-12-zlib.orc", ["_col1", "_col3", "_col5"]), ], ) def test_chunked_orc_writer( datadir, tmpdir, reference_file, columns, compression ): pdf_fname = datadir / reference_file gdf_fname = tmpdir.join("chunked_gdf.orc") pdf = pd.read_orc(pdf_fname, columns=columns) gdf = cudf.from_pandas(pdf) expect = pd.concat([pdf, pdf]).reset_index(drop=True) writer = ORCWriter(gdf_fname, compression=compression) writer.write_table(gdf) writer.write_table(gdf) writer.close() got = pd.read_orc(gdf_fname, columns=columns) assert_frame_equal(cudf.from_pandas(expect), cudf.from_pandas(got)) @pytest.mark.parametrize( "dtypes", [ {"c": str, "a": int}, {"c": int, "a": str}, {"c": int, "a": str, "b": float}, {"c": str, "a": object}, ], ) def test_orc_writer_strings(tmpdir, dtypes): gdf_fname = tmpdir.join("gdf_strings.orc") expect = cudf.datasets.randomdata(nrows=10, dtypes=dtypes, seed=1) expect.to_orc(gdf_fname) got = pd.read_orc(gdf_fname) assert_eq(expect, got) @pytest.mark.parametrize( "dtypes", [ {"c": str, "a": int}, {"c": int, "a": str}, {"c": int, "a": str, "b": float}, {"c": str, "a": object}, ], ) def test_chunked_orc_writer_strings(tmpdir, dtypes): gdf_fname = tmpdir.join("chunked_gdf_strings.orc") gdf = cudf.datasets.randomdata(nrows=10, dtypes=dtypes, seed=1) pdf = gdf.to_pandas() expect = pd.concat([pdf, pdf]).reset_index(drop=True) writer = ORCWriter(gdf_fname) writer.write_table(gdf) writer.write_table(gdf) writer.close() got = pd.read_orc(gdf_fname) assert_eq(expect, got) def test_orc_writer_sliced(tmpdir): cudf_path = tmpdir.join("cudf.orc") df = pd.DataFrame() df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"]) df = cudf.from_pandas(df) df_select = df.iloc[1:3] df_select.to_orc(cudf_path) assert_eq(cudf.read_orc(cudf_path), df_select) @pytest.mark.parametrize( "orc_file", [ "TestOrcFile.decimal.orc", "TestOrcFile.decimal.same.values.orc", "TestOrcFile.decimal.multiple.values.orc", # For additional information take look at PR 7034 "TestOrcFile.decimal.runpos.issue.orc", ], ) def test_orc_reader_decimal_type(datadir, orc_file): file_path = datadir / orc_file pdf = pd.read_orc(file_path) df = cudf.read_orc(file_path) assert_eq(pdf, df) def test_orc_decimal_precision_fail(datadir): file_path = datadir / "TestOrcFile.int_decimal.precision_19.orc" # Shouldn't cause failure if decimal column is not chosen to be read. pdf = pd.read_orc(file_path, columns=["int"]) gdf = cudf.read_orc(file_path, columns=["int"]) assert_eq(pdf, gdf) # For additional information take look at PR 6636 and 6702 @pytest.mark.parametrize( "orc_file", [ "TestOrcFile.boolean_corruption_PR_6636.orc", "TestOrcFile.boolean_corruption_PR_6702.orc", ], ) def test_orc_reader_boolean_type(datadir, orc_file): file_path = datadir / orc_file pdf = pd.read_orc(file_path) df = cudf.read_orc(file_path).to_pandas() assert_eq(pdf, df) def test_orc_reader_tzif_timestamps(datadir): # Contains timstamps in the range covered by the TZif file # Other timedate tests only cover "future" times path = datadir / "TestOrcFile.lima_timezone.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path) assert_eq(pdf, gdf) def test_int_overflow(tmpdir): file_path = tmpdir.join("gdf_overflow.orc") # The number of rows and the large element trigger delta encoding num_rows = 513 df = cudf.DataFrame({"a": [None] * num_rows}, dtype="int32") df["a"][0] = 1024 * 1024 * 1024 df["a"][num_rows - 1] = 1 df.to_orc(file_path) assert_eq(cudf.read_orc(file_path), df) def normalized_equals(value1, value2): # need naive time object for numpy to convert to datetime64 if isinstance(value1, datetime.datetime): value1 = value1.replace(tzinfo=None) if isinstance(value2, datetime.datetime): value2 = value2.replace(tzinfo=None) if isinstance(value1, (datetime.datetime, np.datetime64)): value1 = np.datetime64(value1, "ms") if isinstance(value2, (datetime.datetime, np.datetime64)): value2 = np.datetime64(value2, "ms") # Compare integers with floats now if isinstance(value1, float) or isinstance(value2, float): return np.isclose(value1, value2) return value1 == value2 @pytest.mark.parametrize("stats_freq", ["STRIPE", "ROWGROUP"]) @pytest.mark.parametrize("nrows", [1, 100, 6000000]) def test_orc_write_statistics(tmpdir, datadir, nrows, stats_freq): from pyarrow import orc supported_stat_types = supported_numpy_dtypes + ["str"] # Can't write random bool columns until issue #6763 is fixed if nrows == 6000000: supported_stat_types.remove("bool") # Make a dataframe gdf = cudf.DataFrame( { "col_" + str(dtype): gen_rand_series(dtype, nrows, has_nulls=True) for dtype in supported_stat_types } ) fname = tmpdir.join("gdf.orc") # Write said dataframe to ORC with cuDF gdf.to_orc(fname.strpath, statistics=stats_freq) # Read back written ORC's statistics orc_file = orc.ORCFile(fname) ( file_stats, stripes_stats, ) = cudf.io.orc.read_orc_statistics([fname]) # check file stats for col in gdf: if "minimum" in file_stats[0][col]: stats_min = file_stats[0][col]["minimum"] if stats_min is not None: actual_min = gdf[col].min() assert normalized_equals(actual_min, stats_min) if "maximum" in file_stats[0][col]: stats_max = file_stats[0][col]["maximum"] if stats_max is not None: actual_max = gdf[col].max() assert normalized_equals(actual_max, stats_max) if "number_of_values" in file_stats[0][col]: stats_num_vals = file_stats[0][col]["number_of_values"] if stats_num_vals is not None: actual_num_vals = gdf[col].count() assert stats_num_vals == actual_num_vals # compare stripe statistics with actual min/max for stripe_idx in range(0, orc_file.nstripes): stripe = orc_file.read_stripe(stripe_idx) # pandas is unable to handle min/max of string col with nulls stripe_df = cudf.DataFrame(stripe.to_pandas()) for col in stripe_df: if "minimum" in stripes_stats[stripe_idx][col]: stats_min = stripes_stats[stripe_idx][col]["minimum"] if stats_min is not None: actual_min = stripe_df[col].min() assert normalized_equals(actual_min, stats_min) if "maximum" in stripes_stats[stripe_idx][col]: stats_max = stripes_stats[stripe_idx][col]["maximum"] if stats_max is not None: actual_max = stripe_df[col].max() assert normalized_equals(actual_max, stats_max) if "number_of_values" in stripes_stats[stripe_idx][col]: stats_num_vals = stripes_stats[stripe_idx][col][ "number_of_values" ] if stats_num_vals is not None: actual_num_vals = stripe_df[col].count() assert stats_num_vals == actual_num_vals @pytest.mark.parametrize("stats_freq", ["STRIPE", "ROWGROUP"]) @pytest.mark.parametrize("nrows", [2, 100, 6000000]) def test_orc_chunked_write_statistics(tmpdir, datadir, nrows, stats_freq): from pyarrow import orc np.random.seed(0) supported_stat_types = supported_numpy_dtypes + ["str"] # Can't write random bool columns until issue #6763 is fixed if nrows == 6000000: supported_stat_types.remove("bool") gdf_fname = tmpdir.join("chunked_stats.orc") writer = ORCWriter(gdf_fname) max_char_length = 1000 if nrows < 10000 else 100 # Make a dataframe gdf = cudf.DataFrame( { "col_" + str(dtype): gen_rand_series( dtype, int(nrows / 2), has_nulls=True, low=0, high=max_char_length, ) for dtype in supported_stat_types } ) pdf1 = gdf.to_pandas() writer.write_table(gdf) # gdf is specifically being reused here to ensure the data is destroyed # before the next write_table call to ensure the data is persisted inside # write and no pointers are saved into the original table gdf = cudf.DataFrame( { "col_" + str(dtype): gen_rand_series( dtype, int(nrows / 2), has_nulls=True, low=0, high=max_char_length, ) for dtype in supported_stat_types } ) pdf2 = gdf.to_pandas() writer.write_table(gdf) writer.close() # pandas is unable to handle min/max of string col with nulls expect = cudf.DataFrame(pd.concat([pdf1, pdf2]).reset_index(drop=True)) # Read back written ORC's statistics orc_file = orc.ORCFile(gdf_fname) ( file_stats, stripes_stats, ) = cudf.io.orc.read_orc_statistics([gdf_fname]) # check file stats for col in expect: if "minimum" in file_stats[0][col]: stats_min = file_stats[0][col]["minimum"] if stats_min is not None: actual_min = expect[col].min() assert normalized_equals(actual_min, stats_min) if "maximum" in file_stats[0][col]: stats_max = file_stats[0][col]["maximum"] if stats_max is not None: actual_max = expect[col].max() assert normalized_equals(actual_max, stats_max) if "number_of_values" in file_stats[0][col]: stats_num_vals = file_stats[0][col]["number_of_values"] if stats_num_vals is not None: actual_num_vals = expect[col].count() assert stats_num_vals == actual_num_vals # compare stripe statistics with actual min/max for stripe_idx in range(0, orc_file.nstripes): stripe = orc_file.read_stripe(stripe_idx) # pandas is unable to handle min/max of string col with nulls stripe_df = cudf.DataFrame(stripe.to_pandas()) for col in stripe_df: if "minimum" in stripes_stats[stripe_idx][col]: stats_min = stripes_stats[stripe_idx][col]["minimum"] if stats_min is not None: actual_min = stripe_df[col].min() assert normalized_equals(actual_min, stats_min) if "maximum" in stripes_stats[stripe_idx][col]: stats_max = stripes_stats[stripe_idx][col]["maximum"] if stats_max is not None: actual_max = stripe_df[col].max() assert normalized_equals(actual_max, stats_max) if "number_of_values" in stripes_stats[stripe_idx][col]: stats_num_vals = stripes_stats[stripe_idx][col][ "number_of_values" ] if stats_num_vals is not None: actual_num_vals = stripe_df[col].count() assert stats_num_vals == actual_num_vals @pytest.mark.parametrize("nrows", [1, 100, 6000000]) def test_orc_write_bool_statistics(tmpdir, datadir, nrows): from pyarrow import orc # Make a dataframe gdf = cudf.DataFrame({"col_bool": gen_rand_series("bool", nrows)}) fname = tmpdir.join("gdf.orc") # Write said dataframe to ORC with cuDF gdf.to_orc(fname.strpath) # Read back written ORC's statistics orc_file = orc.ORCFile(fname) ( file_stats, stripes_stats, ) = cudf.io.orc.read_orc_statistics([fname]) # check file stats col = "col_bool" if "true_count" in file_stats[0][col]: stats_true_count = file_stats[0][col]["true_count"] actual_true_count = gdf[col].sum() assert normalized_equals(actual_true_count, stats_true_count) if "number_of_values" in file_stats[0][col]: stats_valid_count = file_stats[0][col]["number_of_values"] actual_valid_count = gdf[col].valid_count assert normalized_equals(actual_valid_count, stats_valid_count) # compare stripe statistics with actual min/max for stripe_idx in range(0, orc_file.nstripes): stripe = orc_file.read_stripe(stripe_idx) # pandas is unable to handle min/max of string col with nulls stripe_df = cudf.DataFrame(stripe.to_pandas()) if "true_count" in stripes_stats[stripe_idx][col]: actual_true_count = stripe_df[col].sum() stats_true_count = stripes_stats[stripe_idx][col]["true_count"] assert normalized_equals(actual_true_count, stats_true_count) if "number_of_values" in stripes_stats[stripe_idx][col]: actual_valid_count = stripe_df[col].valid_count stats_valid_count = stripes_stats[stripe_idx][col][ "number_of_values" ] assert normalized_equals(actual_valid_count, stats_valid_count) def test_orc_reader_gmt_timestamps(datadir): path = datadir / "TestOrcFile.gmt.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path) assert_eq(pdf, gdf) def test_orc_bool_encode_fail(): np.random.seed(0) buffer = BytesIO() # Generate a boolean column longer than a single stripe fail_df = cudf.DataFrame({"col": gen_rand_series("bool", 600000)}) # Invalidate the first row in the second stripe to break encoding fail_df["col"][500000] = None # Should throw instead of generating a file that is incompatible # with other readers (see issue #6763) with pytest.raises(RuntimeError): fail_df.to_orc(buffer) # Generate a boolean column that fits into a single stripe okay_df = cudf.DataFrame({"col": gen_rand_series("bool", 500000)}) okay_df["col"][500000 - 1] = None # Invalid row is in the last row group of the stripe; # encoding is assumed to be correct okay_df.to_orc(buffer) # Also validate data pdf = pd.read_orc(buffer) assert_eq(okay_df.to_pandas(nullable=True), pdf) def test_nanoseconds_overflow(): buffer = BytesIO() # Use nanosecond values that take more than 32 bits to encode s = cudf.Series([710424008, -1338482640], dtype="datetime64[ns]") expected = cudf.DataFrame({"s": s}) expected.to_orc(buffer) cudf_got = cudf.read_orc(buffer) assert_eq(expected, cudf_got) pandas_got = pd.read_orc(buffer) assert_eq(expected, pandas_got) def test_empty_dataframe(): buffer = BytesIO() expected = cudf.DataFrame() expected.to_orc(buffer) # Raise error if column name is mentioned, but it doesn't exist. with pytest.raises(RuntimeError): cudf.read_orc(buffer, columns=["a"]) got_df = cudf.read_orc(buffer) expected_pdf = pd.read_orc(buffer) assert_eq(expected, got_df) assert_eq(expected_pdf, got_df) @pytest.mark.parametrize( "data", [[None, ""], ["", None], [None, None], ["", ""]] ) def test_empty_string_columns(data): buffer = BytesIO() expected = cudf.DataFrame({"string": data}, dtype="str") expected.to_orc(buffer) expected_pdf = pd.read_orc(buffer) got_df = cudf.read_orc(buffer) assert_eq(expected, got_df) assert_eq( expected_pdf, got_df.to_pandas(nullable=True) if expected_pdf["string"].dtype == pd.StringDtype() else got_df, ) @pytest.mark.parametrize("scale", [-3, 0, 3]) @pytest.mark.parametrize( "decimal_type", [cudf.Decimal32Dtype, cudf.Decimal64Dtype, cudf.Decimal128Dtype], ) def test_orc_writer_decimal(tmpdir, scale, decimal_type): np.random.seed(0) fname = tmpdir / "decimal.orc" expected = cudf.DataFrame({"dec_val": gen_rand_series("i", 100)}) expected["dec_val"] = expected["dec_val"].astype(decimal_type(7, scale)) expected.to_orc(fname) got = pd.read_orc(fname) assert_eq(expected.to_pandas()["dec_val"], got["dec_val"]) @pytest.mark.parametrize("num_rows", [1, 100, 3000]) def test_orc_reader_multiple_files(datadir, num_rows): path = datadir / "TestOrcFile.testSnappy.orc" df_1 = pd.read_orc(path) df_2 = pd.read_orc(path) df = pd.concat([df_1, df_2], ignore_index=True) gdf = cudf.read_orc([path, path], num_rows=num_rows).to_pandas() # Slice rows out of the whole dataframe for comparison as PyArrow doesn't # have an API to read a subsection of rows from the file df = df[:num_rows] df = df.reset_index(drop=True) assert_eq(df, gdf) def test_orc_reader_multi_file_single_stripe(datadir): path = datadir / "TestOrcFile.testSnappy.orc" # should raise an exception with pytest.raises(ValueError): cudf.read_orc([path, path], stripes=[0]) def test_orc_reader_multi_file_multi_stripe(datadir): path = datadir / "TestOrcFile.testStripeLevelStats.orc" gdf = cudf.read_orc([path, path], stripes=[[0, 1], [2]]) pdf = pd.read_orc(path) assert_eq(pdf, gdf) def test_orc_string_stream_offset_issue(): size = 30000 vals = { str(x): [decimal.Decimal(1)] * size if x != 0 else ["XYZ"] * size for x in range(0, 5) } df = cudf.DataFrame(vals) buffer = BytesIO() df.to_orc(buffer) assert_eq(df, cudf.read_orc(buffer)) def generate_list_struct_buff(size=100_000): rd = random.Random(1) np.random.seed(seed=1) buff = BytesIO() lvl3_list = [ rd.choice( [ None, [ [ [ rd.choice([None, np.random.randint(1, 3)]) for _ in range(np.random.randint(1, 3)) ] for _ in range(np.random.randint(0, 3)) ] for _ in range(np.random.randint(0, 3)) ], ] ) for _ in range(size) ] lvl1_list = [ [ rd.choice([None, np.random.randint(0, 3)]) for _ in range(np.random.randint(1, 4)) ] for _ in range(size) ] lvl1_struct = [ rd.choice( [ None, {"a": np.random.randint(0, 3), "b": np.random.randint(0, 3)}, ] ) for _ in range(size) ] lvl2_struct = [ rd.choice( [ None, {"a": rd.choice([None, np.random.randint(0, 3)])}, { "lvl1_struct": { "c": rd.choice([None, np.random.randint(0, 3)]), "d": np.random.randint(0, 3), }, }, ] ) for _ in range(size) ] list_nests_struct = [ [ {"a": rd.choice(lvl1_struct), "b": rd.choice(lvl1_struct)} for _ in range(np.random.randint(1, 4)) ] for _ in range(size) ] struct_nests_list = [ {"struct": lvl1_struct[x], "list": lvl1_list[x]} for x in range(size) ] df = pd.DataFrame( { "lvl3_list": lvl3_list, "lvl1_list": lvl1_list, "lvl1_struct": lvl1_struct, "lvl2_struct": lvl2_struct, "list_nests_struct": list_nests_struct, "struct_nests_list": struct_nests_list, } ) df.to_orc(buff, engine="pyarrow", engine_kwargs={"stripe_size": 1024}) return buff @pytest.fixture(scope="module") def list_struct_buff(): return generate_list_struct_buff() @pytest.mark.parametrize( "columns", [ None, ["lvl3_list", "list_nests_struct", "lvl2_struct", "struct_nests_list"], ["lvl2_struct", "lvl1_struct"], ], ) @pytest.mark.parametrize("num_rows", [0, 15, 1005, 10561, 100_000]) @pytest.mark.parametrize("use_index", [True, False]) def test_lists_struct_nests(columns, num_rows, use_index, list_struct_buff): from pyarrow import orc gdf = cudf.read_orc( list_struct_buff, columns=columns, num_rows=num_rows, use_index=use_index, ) pyarrow_tbl = orc.ORCFile(list_struct_buff).read() pyarrow_tbl = ( pyarrow_tbl[:num_rows] if columns is None else pyarrow_tbl.select(columns)[:num_rows] ) if num_rows > 0: assert pyarrow_tbl.equals(gdf.to_arrow()) else: assert_eq(pyarrow_tbl.to_pandas(), gdf) @pytest.mark.parametrize("columns", [None, ["lvl1_struct"], ["lvl1_list"]]) def test_skip_rows_for_nested_types(columns, list_struct_buff): with pytest.raises( RuntimeError, match="skip_rows is not supported by nested column" ): cudf.read_orc( list_struct_buff, columns=columns, use_index=True, skiprows=5, ) def test_pyspark_struct(datadir): path = datadir / "TestOrcFile.testPySparkStruct.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path) assert_eq(pdf, gdf) def gen_map_buff(size=10000): from string import ascii_letters as al from pyarrow import orc rd = random.Random(1) np.random.seed(seed=1) buff = BytesIO() lvl1_map = pa.array( [ rd.choice( [ None, { rd.choice(al): rd.choice( [None, np.random.randint(1, 1500)] ), }, ] ) for _ in range(size) ], type=pa.map_(pa.string(), pa.int64()), ) lvl2_map = pa.array( [ rd.choice( [ None, *( { rd.choice(al): rd.choice( [ None, [ rd.choice( [None, np.random.randint(1, 1500)] ) for _ in range(5) ], ] ) } for _ in range(2) ), ] ) for _ in range(size) ], type=pa.map_(pa.string(), pa.list_(pa.int64())), ) lvl2_struct_map = pa.array( [ rd.choice( [ None, *( { rd.choice(al): rd.choice( [ None, { "a": rd.choice( [None, np.random.randint(1, 1500)] ), "b": rd.choice( [None, np.random.randint(1, 1500)] ), }, ] ) } for _ in range(2) ), ] ) for _ in range(size) ], type=pa.map_( pa.string(), pa.struct({"a": pa.int64(), "b": pa.int64()}) ), ) pa_table = pa.Table.from_arrays( [lvl1_map, lvl2_map, lvl2_struct_map], ["lvl1_map", "lvl2_map", "lvl2_struct_map"], ) orc.write_table( pa_table, buff, stripe_size=1024, compression="UNCOMPRESSED" ) return buff map_buff = gen_map_buff(size=100000) @pytest.mark.parametrize( "columns", [None, ["lvl1_map", "lvl2_struct_map"], ["lvl2_struct_map", "lvl2_map"]], ) @pytest.mark.parametrize("num_rows", [0, 15, 1005, 10561, 100000]) @pytest.mark.parametrize("use_index", [True, False]) def test_map_type_read(columns, num_rows, use_index): from pyarrow import orc tbl = orc.read_table(map_buff) lvl1_map = ( tbl["lvl1_map"] .combine_chunks() .view(pa.list_(pa.struct({"key": pa.string(), "value": pa.int64()}))) ) lvl2_map = ( tbl["lvl2_map"] .combine_chunks() .view( pa.list_( pa.struct({"key": pa.string(), "value": pa.list_(pa.int64())}) ) ) ) lvl2_struct_map = ( tbl["lvl2_struct_map"] .combine_chunks() .view( pa.list_( pa.struct( { "key": pa.string(), "value": pa.struct({"a": pa.int64(), "b": pa.int64()}), } ) ) ) ) expected_tbl = pa.table( { "lvl1_map": lvl1_map, "lvl2_map": lvl2_map, "lvl2_struct_map": lvl2_struct_map, } ) gdf = cudf.read_orc( map_buff, columns=columns, num_rows=num_rows, use_index=use_index ) expected_tbl = ( expected_tbl[:num_rows] if columns is None else expected_tbl.select(columns)[:num_rows] ) if num_rows > 0: assert expected_tbl.equals(gdf.to_arrow()) else: assert_eq(expected_tbl.to_pandas(), gdf) def test_orc_reader_decimal(datadir): path = datadir / "TestOrcFile.decimal.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path) assert_eq(pdf, gdf) # This test case validates the issue raised in #8665, # please check the issue for more details. def test_orc_timestamp_read(datadir): path = datadir / "TestOrcFile.timestamp.issue.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path) assert_eq(pdf, gdf) def dec(num): return decimal.Decimal(str(num)) @pytest.mark.parametrize( "data", [ # basic + nested strings { "lls": [[["a"], ["bb"]] * 5 for i in range(12345)], "lls2": [[["ccc", "dddd"]] * 6 for i in range(12345)], "ls_dict": [["X"] * 7 for i in range(12345)], "ls_direct": [[str(i)] * 9 for i in range(12345)], "li": [[i] * 11 for i in range(12345)], "lf": [[i * 0.5] * 13 for i in range(12345)], "ld": [[dec(i / 2)] * 15 for i in range(12345)], }, # with nulls { "ls": [ [str(i) if i % 5 else None, str(2 * i)] if i % 2 else None for i in range(12345) ], "li": [[i, i * i, i % 2] if i % 3 else None for i in range(12345)], "ld": [ [dec(i), dec(i / 2) if i % 7 else None] if i % 5 else None for i in range(12345) ], }, # with empty elements { "ls": [ [str(i), str(2 * i)] if i % 2 else [] for i in range(12345) ], "lls": [ [[str(i), str(2 * i)]] if i % 2 else [[], []] for i in range(12345) ], "li": [[i, i * i, i % 2] if i % 3 else [] for i in range(12345)], "lli": [ [[i], [i * i], [i % 2]] if i % 3 else [[]] for i in range(12345) ], "ld": [ [dec(i), dec(i / 2)] if i % 5 else [] for i in range(12345) ], }, # variable list lengths { "ls": [[str(i)] * i for i in range(123)], "li": [[i, i * i] * i for i in range(123)], "ld": [[dec(i), dec(i / 2)] * i for i in range(123)], }, # many child elements (more that max_stripe_rows) {"li": [[i] * 1100 for i in range(11000)]}, ], ) def test_orc_writer_lists(data): pdf_in = pd.DataFrame(data) buffer = BytesIO() cudf.from_pandas(pdf_in).to_orc( buffer, stripe_size_rows=2048, row_index_stride=512 ) pdf_out = pd.read_orc(buffer) assert_eq(pdf_out, pdf_in) def test_chunked_orc_writer_lists(): num_rows = 12345 pdf_in = pd.DataFrame( { "ls": [[str(i), str(2 * i)] for i in range(num_rows)], "ld": [[dec(i / 2)] * 5 for i in range(num_rows)], } ) gdf = cudf.from_pandas(pdf_in) expect = pd.concat([pdf_in, pdf_in]).reset_index(drop=True) buffer = BytesIO() writer = ORCWriter(buffer) writer.write_table(gdf) writer.write_table(gdf) writer.close() got = pd.read_orc(buffer) assert_eq(expect, got) def test_writer_timestamp_stream_size(datadir, tmpdir): pdf_fname = datadir / "TestOrcFile.largeTimestamps.orc" gdf_fname = tmpdir.join("gdf.orc") expect = pd.read_orc(pdf_fname) cudf.from_pandas(expect).to_orc(gdf_fname.strpath) got = pd.read_orc(gdf_fname) assert_eq(expect, got) @pytest.mark.parametrize( "fname", [ "TestOrcFile.NoIndStrm.StructWithNoNulls.orc", "TestOrcFile.NoIndStrm.StructAndIntWithNulls.orc", "TestOrcFile.NoIndStrm.StructAndIntWithNulls.TwoStripes.orc", "TestOrcFile.NoIndStrm.IntWithNulls.orc", ], ) def test_no_row_group_index_orc_read(datadir, fname): from pyarrow import orc fpath = datadir / fname expect = orc.ORCFile(fpath).read() got = cudf.read_orc(fpath) assert expect.equals(got.to_arrow()) def test_names_in_struct_dtype_nesting(datadir): from pyarrow import orc fname = datadir / "TestOrcFile.NestedStructDataFrame.orc" expect = orc.ORCFile(fname).read() got = cudf.read_orc(fname) # test dataframes assert expect.equals(got.to_arrow()) edf = cudf.DataFrame(expect.to_pandas()) # test schema assert edf.dtypes.equals(got.dtypes) def test_writer_lists_structs(list_struct_buff): from pyarrow import orc df_in = cudf.read_orc(list_struct_buff) buff = BytesIO() df_in.to_orc(buff) pyarrow_tbl = orc.ORCFile(buff).read() assert pyarrow_tbl.equals(df_in.to_arrow()) @pytest.mark.parametrize( "data", [ { "with_pd": [ [i if i % 3 else None] if i < 9999 or i > 20001 else None for i in range(21000) ], "no_pd": [ [i if i % 3 else None] if i < 9999 or i > 20001 else [] for i in range(21000) ], }, ], ) def test_orc_writer_lists_empty_rg(data): pdf_in = pd.DataFrame(data) buffer = BytesIO() cudf_in = cudf.from_pandas(pdf_in) cudf_in.to_orc(buffer) df = cudf.read_orc(buffer) assert_eq(df, cudf_in) pdf_out = pd.read_orc(buffer) assert_eq(pdf_in, pdf_out) def test_statistics_sum_overflow(): maxint64 = np.iinfo(np.int64).max minint64 = np.iinfo(np.int64).min buff = BytesIO() df = pd.DataFrame( {"a": [maxint64, 1], "b": [minint64, -1], "c": [minint64, 1]} ) df.to_orc(buff) file_stats, stripe_stats = cudf.io.orc.read_orc_statistics([buff]) assert file_stats[0]["a"].get("sum") is None assert file_stats[0]["b"].get("sum") is None assert file_stats[0]["c"].get("sum") == minint64 + 1 assert stripe_stats[0]["a"].get("sum") is None assert stripe_stats[0]["b"].get("sum") is None assert stripe_stats[0]["c"].get("sum") == minint64 + 1 def test_empty_statistics(): from pyarrow import orc buff = BytesIO() pa_table = pa.Table.from_arrays( [ pa.array([None], type=pa.int64()), pa.array([None], type=pa.float64()), pa.array([None], type=pa.string()), pa.array([None], type=pa.decimal128(11, 2)), pa.array([None], type=pa.timestamp("ns")), pa.array([None], type=pa.date64()), pa.array([None], type=pa.bool_()), pa.array([None], type=pa.binary()), pa.array([1], type=pa.int64()), ], ["a", "b", "c", "d", "e", "f", "g", "h", "i"], ) orc.write_table(pa_table, buff) got = cudf.io.orc.read_orc_statistics([buff]) # Check for both file and stripe stats for stats in got: # Similar expected stats for the first 6 columns in this case for col_name in ascii_lowercase[:6]: assert stats[0][col_name].get("number_of_values") == 0 assert stats[0][col_name].get("has_null") is True assert stats[0][col_name].get("minimum") is None assert stats[0][col_name].get("maximum") is None for col_name in ascii_lowercase[:3]: assert stats[0][col_name].get("sum") == 0 # Sum for decimal column is a string assert stats[0]["d"].get("sum") == "0" assert stats[0]["g"].get("number_of_values") == 0 assert stats[0]["g"].get("has_null") is True assert stats[0]["g"].get("true_count") == 0 assert stats[0]["g"].get("false_count") == 0 assert stats[0]["h"].get("number_of_values") == 0 assert stats[0]["h"].get("has_null") is True assert stats[0]["h"].get("sum") == 0 assert stats[0]["i"].get("number_of_values") == 1 assert stats[0]["i"].get("has_null") is False assert stats[0]["i"].get("minimum") == 1 assert stats[0]["i"].get("maximum") == 1 assert stats[0]["i"].get("sum") == 1 @pytest.mark.parametrize( "equivalent_columns", [ (["lvl1_struct.a", "lvl1_struct.b"], ["lvl1_struct"]), (["lvl1_struct", "lvl1_struct.a"], ["lvl1_struct"]), (["lvl1_struct.a", "lvl1_struct"], ["lvl1_struct"]), (["lvl1_struct.b", "lvl1_struct.a"], ["lvl1_struct.b", "lvl1_struct"]), (["lvl2_struct.lvl1_struct", "lvl2_struct"], ["lvl2_struct"]), ( ["lvl2_struct.a", "lvl2_struct.lvl1_struct.c", "lvl2_struct"], ["lvl2_struct"], ), ], ) def test_select_nested(list_struct_buff, equivalent_columns): # The two column selections should be equivalent df_cols1 = cudf.read_orc(list_struct_buff, columns=equivalent_columns[0]) df_cols2 = cudf.read_orc(list_struct_buff, columns=equivalent_columns[1]) assert_eq(df_cols1, df_cols2) def test_orc_writer_rle_stream_size(datadir, tmpdir): from pyarrow import orc original = datadir / "TestOrcFile.int16.rle.size.orc" reencoded = tmpdir.join("int16_map.orc") df = cudf.read_orc(original) df.to_orc(reencoded) # Segfaults when RLE stream sizes don't account for varint length pa_out = orc.ORCFile(reencoded).read() assert df.to_arrow().equals(pa_out) def test_empty_columns(): buffer = BytesIO() # string and decimal columns have additional steps that need to be skipped expected = cudf.DataFrame( { "string": cudf.Series([], dtype="str"), "decimal": cudf.Series([], dtype=cudf.Decimal64Dtype(10, 1)), } ) expected.to_orc(buffer, compression="snappy") got_df = cudf.read_orc(buffer) assert_eq(expected, got_df) def test_orc_reader_zstd_compression(list_struct_buff): from pyarrow import orc expected = cudf.read_orc(list_struct_buff) # save with ZSTD compression buffer = BytesIO() pyarrow_tbl = orc.ORCFile(list_struct_buff).read() writer = orc.ORCWriter(buffer, compression="zstd") writer.write(pyarrow_tbl) writer.close() try: got = cudf.read_orc(buffer) assert_eq(expected, got) except RuntimeError: pytest.mark.xfail(reason="zstd support is not enabled") def test_writer_protobuf_large_rowindexentry(): s = [ "Length of the two strings needs to add up to at least ~120", "So that the encoded statistics are larger than 128 bytes", ] * 5001 # generate more than 10K rows to have two row groups df = cudf.DataFrame({"s1": s}) buff = BytesIO() df.to_orc(buff) got = cudf.read_orc(buff) assert_frame_equal(df, got) @pytest.mark.parametrize("compression", ["ZLIB", "ZSTD"]) def test_orc_writer_nvcomp(compression): expected = cudf.datasets.randomdata( nrows=12345, dtypes={"a": int, "b": str, "c": float}, seed=1 ) buff = BytesIO() try: expected.to_orc(buff, compression=compression) except RuntimeError: pytest.mark.xfail(reason="Newer nvCOMP version is required") else: got = pd.read_orc(buff) assert_eq(expected, got) def run_orc_columns_and_index_param(index_obj, index, columns): buffer = BytesIO() df = cudf.DataFrame( {"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=index_obj ) df.to_orc(buffer, index=index) expected = pd.read_orc(buffer, columns=columns) got = cudf.read_orc(buffer, columns=columns) if columns: # TODO: Remove workaround after this issue is fixed: # https://github.com/pandas-dev/pandas/issues/47944 assert_eq( expected.sort_index(axis=1), got.sort_index(axis=1), check_index_type=True, ) else: assert_eq(expected, got, check_index_type=True) @pytest.mark.parametrize("index_obj", [None, [10, 11, 12], ["x", "y", "z"]]) @pytest.mark.parametrize("index", [True, False, None]) @pytest.mark.parametrize( "columns", [ None, [], ], ) def test_orc_columns_and_index_param(index_obj, index, columns): run_orc_columns_and_index_param(index_obj, index, columns) @pytest.mark.parametrize( "columns,index,index_obj", [ ( ["a", "b"], True, None, ), ( ["a", "b"], True, [10, 11, 12], ), ( ["a", "b"], True, ["x", "y", "z"], ), ( ["a", "b"], None, [10, 11, 12], ), ( ["a", "b"], None, ["x", "y", "z"], ), ], ) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/12026") def test_orc_columns_and_index_param_read_index(index_obj, index, columns): run_orc_columns_and_index_param(index_obj, index, columns) @pytest.mark.parametrize( "columns,index,index_obj", [ (["a", "b"], False, None), (["a", "b"], False, [10, 11, 12]), (["a", "b"], False, ["x", "y", "z"]), (["a", "b"], None, None), ], ) def test_orc_columns_and_index_param_no_read_index(index_obj, index, columns): run_orc_columns_and_index_param(index_obj, index, columns) @pytest.mark.parametrize( "df_data,cols_as_map_type,expected_data", [ ( {"a": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]]}, ["a"], {"a": [[(10, 20)], [(1, 21)]]}, ), ( { "a": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]], "b": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]], }, ["b"], { "a": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]], "b": [[(10, 20)], [(1, 21)]], }, ), ( { "a": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]], "b": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]], "c": [ [{"a": {"a": 10}, "b": 20}], [{"a": {"a": 12}, "b": 21}], ], }, ["b", "c"], { "a": [[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]], "b": [[(10, 20)], [(1, 21)]], "c": [[({"a": 10}, 20)], [({"a": 12}, 21)]], }, ), ], ) def test_orc_writer_cols_as_map_type(df_data, cols_as_map_type, expected_data): df = cudf.DataFrame(df_data) buffer = BytesIO() df.to_orc(buffer, cols_as_map_type=cols_as_map_type) got = pd.read_orc(buffer) expected = pd.DataFrame(expected_data) assert_eq(got, expected) def test_orc_writer_cols_as_map_type_error(): df = cudf.DataFrame( {"a": cudf.Series([[{"a": 10, "b": 20}], [{"a": 1, "b": 21}]])} ) buffer = BytesIO() with pytest.raises( TypeError, match="cols_as_map_type must be a list of column names." ): df.to_orc(buffer, cols_as_map_type=1) @pytest.fixture def negative_timestamp_df(): return cudf.DataFrame( { "a": [ pd.Timestamp("1969-12-31 23:59:59.000123"), pd.Timestamp("1969-12-31 23:59:58.000999"), pd.Timestamp("1969-12-31 23:59:58.001001"), pd.Timestamp("1839-12-24 03:58:56.000826"), ] } ) @pytest.mark.parametrize("engine", ["cudf", "pyarrow"]) def test_orc_reader_negative_timestamp(negative_timestamp_df, engine): buffer = BytesIO() negative_timestamp_df.to_orc(buffer) # We warn the user that this function will fall back to the CPU for reading # when the engine is pyarrow. with expect_warning_if(engine == "pyarrow", UserWarning): got = cudf.read_orc(buffer, engine=engine) assert_eq(negative_timestamp_df, got) def test_orc_writer_negative_timestamp(negative_timestamp_df): from pyarrow import orc buffer = BytesIO() negative_timestamp_df.to_orc(buffer) assert_eq(negative_timestamp_df, pd.read_orc(buffer)) assert_eq(negative_timestamp_df, orc.ORCFile(buffer).read()) def test_orc_reader_apache_negative_timestamp(datadir): path = datadir / "TestOrcFile.apache_timestamp.orc" pdf = pd.read_orc(path) gdf = cudf.read_orc(path) assert_eq(pdf, gdf) def test_statistics_string_sum(): strings = ["a string", "another string!"] buff = BytesIO() df = cudf.DataFrame({"str": strings}) df.to_orc(buff) file_stats, stripe_stats = cudf.io.orc.read_orc_statistics([buff]) assert_eq(file_stats[0]["str"].get("sum"), sum(len(s) for s in strings)) @pytest.mark.parametrize( "fname", [ "TestOrcFile.Hive.OneEmptyMap.orc", "TestOrcFile.Hive.OneEmptyList.orc", "TestOrcFile.Hive.OneNullStruct.orc", "TestOrcFile.Hive.EmptyListStripe.orc", "TestOrcFile.Hive.NullStructStripe.orc", "TestOrcFile.Hive.AllNulls.orc", ], ) def test_reader_empty_stripe(datadir, fname): path = datadir / fname expected = pd.read_orc(path) got = cudf.read_orc(path) assert_eq(expected, got) # needs enough data for multiple row groups @pytest.mark.parametrize("data", [["*"] * 10001, ["**", None] * 5001]) def test_reader_row_index_order(data): expected = cudf.DataFrame({"str": data}, dtype="string") buffer = BytesIO() expected.to_pandas().to_orc(buffer) got = cudf.read_orc(buffer) assert_eq(expected, got) # Test the corner case where empty blocks are compressed # Decompressed data size is zero, even though compressed data size is non-zero # For more information see https://github.com/rapidsai/cudf/issues/13608 def test_orc_reader_empty_decomp_data(datadir): path = datadir / "TestOrcFile.Spark.EmptyDecompData.orc" expect = pd.read_orc(path) got = cudf.read_orc(path) assert_eq(expect, got) def test_orc_reader_empty_deeply_nested_level(datadir): # Test the case where top level struct has nulls, but the nested struct is # not nullable. In this case there is no data in the second level, but we # still need to pass the parent null mask to the third level. path = datadir / "TestOrcFile.Spark.NestedNotNullableStruct.orc" expect = pd.read_orc(path) got = cudf.read_orc(path) assert_eq(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_groupby.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import collections import datetime import itertools import operator import string import textwrap from decimal import Decimal from functools import partial import numpy as np import pandas as pd import pytest from numba import cuda from numpy.testing import assert_array_equal import rmm import cudf from cudf import DataFrame, Series from cudf.core._compat import PANDAS_GE_150, PANDAS_LT_140 from cudf.core.udf.groupby_typing import SUPPORTED_GROUPBY_NUMPY_TYPES from cudf.core.udf.utils import precompiled from cudf.testing._utils import ( DATETIME_TYPES, SIGNED_TYPES, TIMEDELTA_TYPES, assert_eq, assert_exceptions_equal, expect_warning_if, ) from cudf.testing.dataset_generator import rand_dataframe _now = np.datetime64("now") _tomorrow = _now + np.timedelta64(1, "D") _now = np.int64(_now.astype("datetime64[ns]")) _tomorrow = np.int64(_tomorrow.astype("datetime64[ns]")) _index_type_aggs = {"count", "idxmin", "idxmax", "cumcount"} def assert_groupby_results_equal( expect, got, sort=True, as_index=True, by=None, **kwargs ): # Because we don't sort by index by default in groupby, # sort expect and got by index before comparing. if sort: if as_index: expect = expect.sort_index() got = got.sort_index() else: assert by is not None if isinstance(expect, (pd.DataFrame, cudf.DataFrame)): expect = expect.sort_values(by=by).reset_index(drop=True) else: expect = expect.sort_values().reset_index(drop=True) if isinstance(got, cudf.DataFrame): got = got.sort_values(by=by).reset_index(drop=True) else: got = got.sort_values().reset_index(drop=True) assert_eq(expect, got, **kwargs) def make_frame( dataframe_class, nelem, seed=0, extra_levels=(), extra_vals=(), with_datetime=False, ): np.random.seed(seed) df = dataframe_class() df["x"] = np.random.randint(0, 5, nelem) df["y"] = np.random.randint(0, 3, nelem) for lvl in extra_levels: df[lvl] = np.random.randint(0, 2, nelem) df["val"] = np.random.random(nelem) for val in extra_vals: df[val] = np.random.random(nelem) if with_datetime: df["datetime"] = np.random.randint( _now, _tomorrow, nelem, dtype=np.int64 ).astype("datetime64[ns]") return df @pytest.fixture def gdf(): return DataFrame({"x": [1, 2, 3], "y": [0, 1, 1]}) @pytest.fixture def pdf(gdf): return gdf.to_pandas() @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) def test_groupby_mean(nelem): got_df = make_frame(DataFrame, nelem=nelem).groupby(["x", "y"]).mean() expect_df = ( make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).mean() ) assert_groupby_results_equal(got_df, expect_df) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) def test_groupby_mean_3level(nelem): lvls = "z" bys = list("xyz") got_df = ( make_frame(DataFrame, nelem=nelem, extra_levels=lvls) .groupby(bys) .mean() ) expect_df = ( make_frame(pd.DataFrame, nelem=nelem, extra_levels=lvls) .groupby(bys) .mean() ) assert_groupby_results_equal(got_df, expect_df) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) def test_groupby_agg_mean_min(nelem): got_df = ( make_frame(DataFrame, nelem=nelem) .groupby(["x", "y"]) .agg(["mean", "min"]) ) expect_df = ( make_frame(pd.DataFrame, nelem=nelem) .groupby(["x", "y"]) .agg(["mean", "min"]) ) assert_groupby_results_equal(got_df, expect_df) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) def test_groupby_agg_min_max_dictargs(nelem): expect_df = ( make_frame(pd.DataFrame, nelem=nelem, extra_vals="ab") .groupby(["x", "y"]) .agg({"a": "min", "b": "max"}) ) got_df = ( make_frame(DataFrame, nelem=nelem, extra_vals="ab") .groupby(["x", "y"]) .agg({"a": "min", "b": "max"}) ) assert_groupby_results_equal(expect_df, got_df) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) def test_groupby_agg_min_max_dictlist(nelem): expect_df = ( make_frame(pd.DataFrame, nelem=nelem, extra_vals="ab") .groupby(["x", "y"]) .agg({"a": ["min", "max"], "b": ["min", "max"]}) ) got_df = ( make_frame(DataFrame, nelem=nelem, extra_vals="ab") .groupby(["x", "y"]) .agg({"a": ["min", "max"], "b": ["min", "max"]}) ) assert_groupby_results_equal(got_df, expect_df) @pytest.mark.parametrize("as_index", [True, False]) def test_groupby_as_index_single_agg(pdf, gdf, as_index): gdf = gdf.groupby("y", as_index=as_index).agg({"x": "mean"}) pdf = pdf.groupby("y", as_index=as_index).agg({"x": "mean"}) assert_groupby_results_equal(pdf, gdf) @pytest.mark.parametrize("engine", ["cudf", "jit"]) @pytest.mark.parametrize("as_index", [True, False]) def test_groupby_as_index_apply(pdf, gdf, as_index, engine): gdf = gdf.groupby("y", as_index=as_index).apply( lambda df: df["x"].mean(), engine=engine ) pdf = pdf.groupby("y", as_index=as_index).apply(lambda df: df["x"].mean()) assert_groupby_results_equal(pdf, gdf) @pytest.mark.parametrize("as_index", [True, False]) def test_groupby_as_index_multiindex(pdf, gdf, as_index): pdf = pd.DataFrame( {"a": [1, 2, 1], "b": [3, 3, 3], "c": [2, 2, 3], "d": [3, 1, 2]} ) gdf = cudf.from_pandas(pdf) gdf = gdf.groupby(["a", "b"], as_index=as_index, sort=True).agg( {"c": "mean"} ) pdf = pdf.groupby(["a", "b"], as_index=as_index, sort=True).agg( {"c": "mean"} ) if as_index: assert_eq(pdf, gdf) else: # column names don't match - check just the values for gcol, pcol in zip(gdf, pdf): assert_array_equal(gdf[gcol].to_numpy(), pdf[pcol].values) def test_groupby_default(pdf, gdf): gdf = gdf.groupby("y").agg({"x": "mean"}) pdf = pdf.groupby("y").agg({"x": "mean"}) assert_groupby_results_equal(pdf, gdf) def test_group_keys_true(pdf, gdf): gdf = gdf.groupby("y", group_keys=True).sum() pdf = pdf.groupby("y", group_keys=True).sum() assert_groupby_results_equal(pdf, gdf) @pytest.mark.parametrize("as_index", [True, False]) def test_groupby_getitem_getattr(as_index): pdf = pd.DataFrame({"x": [1, 3, 1], "y": [1, 2, 3], "z": [1, 4, 5]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("x", as_index=as_index)["y"].sum(), gdf.groupby("x", as_index=as_index)["y"].sum(), as_index=as_index, by="x", ) assert_groupby_results_equal( pdf.groupby("x", as_index=as_index).y.sum(), gdf.groupby("x", as_index=as_index).y.sum(), as_index=as_index, by="x", ) assert_groupby_results_equal( pdf.groupby("x", as_index=as_index)[["y"]].sum(), gdf.groupby("x", as_index=as_index)[["y"]].sum(), as_index=as_index, by="x", ) assert_groupby_results_equal( pdf.groupby(["x", "y"], as_index=as_index).sum(), gdf.groupby(["x", "y"], as_index=as_index).sum(), as_index=as_index, by=["x", "y"], ) def test_groupby_cats(): df = DataFrame() df["cats"] = pd.Categorical(list("aabaacaab")) df["vals"] = np.random.random(len(df)) cats = df["cats"].values_host vals = df["vals"].to_numpy() grouped = df.groupby(["cats"], as_index=False).mean() got_vals = grouped["vals"] got_cats = grouped["cats"] for i in range(len(got_vals)): expect = vals[cats == got_cats[i]].mean() np.testing.assert_almost_equal(got_vals[i], expect) def test_groupby_iterate_groups(): np.random.seed(0) df = DataFrame() nelem = 20 df["key1"] = np.random.randint(0, 3, nelem) df["key2"] = np.random.randint(0, 2, nelem) df["val1"] = np.random.random(nelem) df["val2"] = np.random.random(nelem) def assert_values_equal(arr): np.testing.assert_array_equal(arr[0], arr) for name, grp in df.groupby(["key1", "key2"]): pddf = grp.to_pandas() for k in "key1,key2".split(","): assert_values_equal(pddf[k].values) def test_groupby_apply(): np.random.seed(0) df = DataFrame() nelem = 20 df["key1"] = np.random.randint(0, 3, nelem) df["key2"] = np.random.randint(0, 2, nelem) df["val1"] = np.random.random(nelem) df["val2"] = np.random.random(nelem) expect_grpby = df.to_pandas().groupby( ["key1", "key2"], as_index=False, group_keys=False ) got_grpby = df.groupby(["key1", "key2"]) def foo(df): df["out"] = df["val1"] + df["val2"] return df expect = expect_grpby.apply(foo) got = got_grpby.apply(foo) assert_groupby_results_equal(expect, got) def create_test_groupby_apply_args_params(): def f1(df, k): df["out"] = df["val1"] + df["val2"] + k return df def f2(df, k, L): df["out"] = df["val1"] - df["val2"] + (k / L) return df def f3(df, k, L, m): df["out"] = ((k * df["val1"]) + (L * df["val2"])) / m return df return [(f1, (42,)), (f2, (42, 119)), (f3, (42, 119, 212.1))] @pytest.mark.parametrize("func,args", create_test_groupby_apply_args_params()) def test_groupby_apply_args(func, args): np.random.seed(0) df = DataFrame() nelem = 20 df["key1"] = np.random.randint(0, 3, nelem) df["key2"] = np.random.randint(0, 2, nelem) df["val1"] = np.random.random(nelem) df["val2"] = np.random.random(nelem) expect_grpby = df.to_pandas().groupby( ["key1", "key2"], as_index=False, group_keys=False ) got_grpby = df.groupby(["key1", "key2"]) expect = expect_grpby.apply(func, *args) got = got_grpby.apply(func, *args) assert_groupby_results_equal(expect, got) def test_groupby_apply_grouped(): np.random.seed(0) df = DataFrame() nelem = 20 df["key1"] = np.random.randint(0, 3, nelem) df["key2"] = np.random.randint(0, 2, nelem) df["val1"] = np.random.random(nelem) df["val2"] = np.random.random(nelem) expect_grpby = df.to_pandas().groupby( ["key1", "key2"], as_index=False, group_keys=False ) got_grpby = df.groupby(["key1", "key2"]) def foo(key1, val1, com1, com2): for i in range(cuda.threadIdx.x, len(key1), cuda.blockDim.x): com1[i] = key1[i] * 10000 + val1[i] com2[i] = i got = got_grpby.apply_grouped( foo, incols=["key1", "val1"], outcols={"com1": np.float64, "com2": np.int32}, tpb=8, ) got = got.to_pandas() # Get expected result by emulating the operation in pandas def emulate(df): df["com1"] = df.key1 * 10000 + df.val1 df["com2"] = np.arange(len(df), dtype=np.int32) return df expect = expect_grpby.apply(emulate) expect = expect.sort_values(["key1", "key2"]) assert_groupby_results_equal(expect, got) @pytest.fixture(scope="module") def groupby_jit_data(): np.random.seed(0) df = DataFrame() nelem = 20 df["key1"] = np.random.randint(0, 3, nelem) df["key2"] = np.random.randint(0, 2, nelem) df["val1"] = np.random.random(nelem) df["val2"] = np.random.random(nelem) df["val3"] = np.random.randint(0, 10, nelem) df["val4"] = np.random.randint(0, 10, nelem) return df def run_groupby_apply_jit_test(data, func, keys, *args): expect_groupby_obj = data.to_pandas().groupby(keys) got_groupby_obj = data.groupby(keys) # compare cuDF jit to pandas cudf_jit_result = got_groupby_obj.apply(func, *args, engine="jit") pandas_result = expect_groupby_obj.apply(func, *args) assert_groupby_results_equal(cudf_jit_result, pandas_result) @pytest.mark.parametrize( "dtype", SUPPORTED_GROUPBY_NUMPY_TYPES, ids=[str(t) for t in SUPPORTED_GROUPBY_NUMPY_TYPES], ) @pytest.mark.parametrize( "func", ["min", "max", "sum", "mean", "var", "std", "idxmin", "idxmax"] ) def test_groupby_apply_jit_reductions(func, groupby_jit_data, dtype): # ideally we'd just have: # lambda group: getattr(group, func)() # but the current kernel caching mechanism relies on pickle which # does not play nice with local functions. What's below uses # exec as a workaround to write the test functions dynamically funcstr = textwrap.dedent( f""" def func(df): return df['val1'].{func}() """ ) lcl = {} exec(funcstr, lcl) func = lcl["func"] groupby_jit_data["val1"] = groupby_jit_data["val1"].astype(dtype) groupby_jit_data["val2"] = groupby_jit_data["val2"].astype(dtype) run_groupby_apply_jit_test(groupby_jit_data, func, ["key1"]) @pytest.mark.parametrize("dtype", ["int32", "int64"]) def test_groupby_apply_jit_correlation(groupby_jit_data, dtype): groupby_jit_data["val3"] = groupby_jit_data["val3"].astype(dtype) groupby_jit_data["val4"] = groupby_jit_data["val4"].astype(dtype) keys = ["key1", "key2"] def func(group): return group["val3"].corr(group["val4"]) run_groupby_apply_jit_test(groupby_jit_data, func, keys) @pytest.mark.parametrize("dtype", ["int32", "int64"]) def test_groupby_apply_jit_correlation_zero_variance(dtype): # pearson correlation is undefined when the variance of either # variable is zero. This test ensures that the jit implementation # returns the same result as pandas in this case. data = DataFrame( {"a": [0, 0, 0, 0, 0], "b": [1, 1, 1, 1, 1], "c": [2, 2, 2, 2, 2]} ) def func(group): return group["b"].corr(group["c"]) run_groupby_apply_jit_test(data, func, ["a"]) @pytest.mark.parametrize("dtype", ["int32"]) def test_groupby_apply_jit_sum_integer_overflow(dtype): max = np.iinfo(dtype).max data = DataFrame( { "a": [0, 0, 0], "b": [max, max, max], } ) def func(group): return group["b"].sum() run_groupby_apply_jit_test(data, func, ["a"]) @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize("func", ["min", "max", "sum", "mean", "var", "std"]) @pytest.mark.parametrize("special_val", [np.nan, np.inf, -np.inf]) def test_groupby_apply_jit_reductions_special_vals( func, groupby_jit_data, dtype, special_val ): # dynamically generate to avoid pickling error. # see test_groupby_apply_jit_reductions for details. funcstr = textwrap.dedent( f""" def func(df): return df['val1'].{func}() """ ) lcl = {} exec(funcstr, lcl) func = lcl["func"] groupby_jit_data["val1"] = special_val groupby_jit_data["val1"] = groupby_jit_data["val1"].astype(dtype) run_groupby_apply_jit_test(groupby_jit_data, func, ["key1"]) @pytest.mark.parametrize("dtype", ["float64"]) @pytest.mark.parametrize("func", ["idxmax", "idxmin"]) @pytest.mark.parametrize( "special_val", [ pytest.param( np.nan, marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/13832" ), ), np.inf, -np.inf, ], ) def test_groupby_apply_jit_idx_reductions_special_vals( func, groupby_jit_data, dtype, special_val ): # dynamically generate to avoid pickling error. # see test_groupby_apply_jit_reductions for details. funcstr = textwrap.dedent( f""" def func(df): return df['val1'].{func}() """ ) lcl = {} exec(funcstr, lcl) func = lcl["func"] groupby_jit_data["val1"] = special_val groupby_jit_data["val1"] = groupby_jit_data["val1"].astype(dtype) expect = groupby_jit_data.to_pandas().groupby("key1").apply(func) got = groupby_jit_data.groupby("key1").apply(func, engine="jit") assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( "func", [ lambda df: df["val1"].max() + df["val2"].min(), lambda df: df["val1"].sum() + df["val2"].var(), lambda df: df["val1"].mean() + df["val2"].std(), ], ) def test_groupby_apply_jit_basic(func, groupby_jit_data): run_groupby_apply_jit_test(groupby_jit_data, func, ["key1", "key2"]) def create_test_groupby_apply_jit_args_params(): def f1(df, k): return df["val1"].max() + df["val2"].min() + k def f2(df, k, L): return df["val1"].sum() - df["val2"].var() + (k / L) def f3(df, k, L, m): return ((k * df["val1"].mean()) + (L * df["val2"].std())) / m return [(f1, (42,)), (f2, (42, 119)), (f3, (42, 119, 212.1))] @pytest.mark.parametrize( "func,args", create_test_groupby_apply_jit_args_params() ) def test_groupby_apply_jit_args(func, args, groupby_jit_data): run_groupby_apply_jit_test(groupby_jit_data, func, ["key1", "key2"], *args) def test_groupby_apply_jit_block_divergence(): # https://github.com/rapidsai/cudf/issues/12686 df = cudf.DataFrame( { "a": [0, 0, 0, 1, 1, 1], "b": [1, 1, 1, 2, 3, 4], } ) def diverging_block(grp_df): if grp_df["a"].mean() > 0: return grp_df["b"].mean() return 0 run_groupby_apply_jit_test(df, diverging_block, ["a"]) def test_groupby_apply_caching(): # Make sure similar functions that differ # by simple things like constants actually # recompile # begin with a clear cache precompiled.clear() assert precompiled.currsize == 0 data = cudf.DataFrame({"a": [1, 1, 1, 2, 2, 2], "b": [1, 2, 3, 4, 5, 6]}) def f(group): return group["b"].mean() * 2 # a single run should result in a cache size of 1 run_groupby_apply_jit_test(data, f, ["a"]) assert precompiled.currsize == 1 # a second run with f should not increase the count run_groupby_apply_jit_test(data, f, ["a"]) assert precompiled.currsize == 1 # changing a constant value inside the UDF should miss def f(group): return group["b"].mean() * 3 run_groupby_apply_jit_test(data, f, ["a"]) assert precompiled.currsize == 2 # changing the dtypes of the columns should miss data["b"] = data["b"].astype("float64") run_groupby_apply_jit_test(data, f, ["a"]) assert precompiled.currsize == 3 def test_groupby_apply_no_bytecode_fallback(): # tests that a function which contains no bytecode # attribute, but would still be executable using # the iterative groupby apply approach, still works. gdf = cudf.DataFrame({"a": [0, 1, 1], "b": [1, 2, 3]}) pdf = gdf.to_pandas() def f(group): return group.sum() part = partial(f) expect = pdf.groupby("a").apply(part) got = gdf.groupby("a").apply(part, engine="auto") assert_groupby_results_equal(expect, got) @pytest.mark.parametrize("func", [lambda group: group.x + group.y]) def test_groupby_apply_return_col_from_df(func): # tests a UDF that consists of purely colwise # ops, such as `lambda group: group.x + group.y` # which returns a column df = cudf.datasets.randomdata() pdf = df.to_pandas() def func(df): return df.x + df.y expect = pdf.groupby("id").apply(func) got = df.groupby("id").apply(func) assert_groupby_results_equal(expect, got) @pytest.mark.parametrize("func", [lambda group: group.sum()]) def test_groupby_apply_return_df(func): # tests a UDF that reduces over a dataframe # and produces a series with the original column names # as its index, such as lambda group: group.sum() + group.min() df = cudf.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, 4]}) pdf = df.to_pandas() expect = pdf.groupby("a").apply(func) got = df.groupby("a").apply(func) assert_groupby_results_equal(expect, got) @pytest.mark.parametrize("nelem", [2, 3, 100, 500, 1000]) @pytest.mark.parametrize( "func", [ "mean", "std", "var", "min", "max", "idxmin", "idxmax", "count", "sum", "prod", ], ) def test_groupby_2keys_agg(nelem, func): # gdf (Note: lack of multiIndex) expect_df = ( make_frame(pd.DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func) ) got_df = make_frame(DataFrame, nelem=nelem).groupby(["x", "y"]).agg(func) check_dtype = func not in _index_type_aggs assert_groupby_results_equal(got_df, expect_df, check_dtype=check_dtype) @pytest.mark.parametrize("num_groups", [2, 3, 10, 50, 100]) @pytest.mark.parametrize("nelem_per_group", [1, 10, 100]) @pytest.mark.parametrize( "func", ["min", "max", "count", "sum"], # TODO: Replace the above line with the one below once # https://github.com/pandas-dev/pandas/issues/40685 is resolved. # "func", ["min", "max", "idxmin", "idxmax", "count", "sum"], ) @pytest.mark.xfail( condition=PANDAS_LT_140, reason="https://github.com/pandas-dev/pandas/issues/43209", ) def test_groupby_agg_decimal(num_groups, nelem_per_group, func): # The number of digits after the decimal to use. decimal_digits = 2 # The number of digits before the decimal to use. whole_digits = 2 scale = 10**whole_digits nelem = num_groups * nelem_per_group # The unique is necessary because otherwise if there are duplicates idxmin # and idxmax may return different results than pandas (see # https://github.com/rapidsai/cudf/issues/7756). This is not relevant to # the current version of the test, because idxmin and idxmax simply don't # work with pandas Series composed of Decimal objects (see # https://github.com/pandas-dev/pandas/issues/40685). However, if that is # ever enabled, then this issue will crop up again so we may as well have # it fixed now. x = np.unique((np.random.rand(nelem) * scale).round(decimal_digits)) y = np.unique((np.random.rand(nelem) * scale).round(decimal_digits)) if x.size < y.size: total_elements = x.size y = y[: x.size] else: total_elements = y.size x = x[: y.size] # Note that this filtering can lead to one group with fewer elements, but # that shouldn't be a problem and is probably useful to test. idx_col = np.tile(np.arange(num_groups), nelem_per_group)[:total_elements] decimal_x = pd.Series([Decimal(str(d)) for d in x]) decimal_y = pd.Series([Decimal(str(d)) for d in y]) pdf = pd.DataFrame({"idx": idx_col, "x": decimal_x, "y": decimal_y}) gdf = DataFrame( { "idx": idx_col, "x": cudf.Series(decimal_x), "y": cudf.Series(decimal_y), } ) expect_df = pdf.groupby("idx", sort=True).agg(func) if rmm._cuda.gpu.runtimeGetVersion() < 11000: with pytest.raises(RuntimeError): got_df = gdf.groupby("idx", sort=True).agg(func) else: got_df = gdf.groupby("idx", sort=True).agg(func) assert_eq(expect_df["x"], got_df["x"], check_dtype=False) assert_eq(expect_df["y"], got_df["y"], check_dtype=False) @pytest.mark.parametrize( "agg", ["min", "max", "idxmin", "idxmax", "count", "sum", "prod", "mean"] ) def test_series_groupby(agg): s = pd.Series([1, 2, 3]) g = Series([1, 2, 3]) sg = s.groupby(s // 2) gg = g.groupby(g // 2) sa = getattr(sg, agg)() ga = getattr(gg, agg)() check_dtype = agg not in _index_type_aggs assert_groupby_results_equal(sa, ga, check_dtype=check_dtype) @pytest.mark.parametrize( "agg", ["min", "max", "idxmin", "idxmax", "count", "sum", "prod", "mean"] ) def test_series_groupby_agg(agg): s = pd.Series([1, 2, 3]) g = Series([1, 2, 3]) sg = s.groupby(s // 2).agg(agg) gg = g.groupby(g // 2).agg(agg) check_dtype = agg not in _index_type_aggs assert_groupby_results_equal(sg, gg, check_dtype=check_dtype) @pytest.mark.parametrize( "agg", [ "min", "max", "count", "sum", "prod", "mean", pytest.param( "idxmin", marks=pytest.mark.xfail(reason="gather needed for idxmin"), ), pytest.param( "idxmax", marks=pytest.mark.xfail(reason="gather needed for idxmax"), ), ], ) def test_groupby_level_zero(agg): pdf = pd.DataFrame({"x": [1, 2, 3]}, index=[2, 5, 5]) gdf = DataFrame.from_pandas(pdf) pdg = pdf.groupby(level=0) gdg = gdf.groupby(level=0) pdresult = getattr(pdg, agg)() gdresult = getattr(gdg, agg)() check_dtype = agg not in _index_type_aggs assert_groupby_results_equal(pdresult, gdresult, check_dtype=check_dtype) @pytest.mark.parametrize( "agg", [ "min", "max", "count", "sum", "prod", "mean", pytest.param( "idxmin", marks=pytest.mark.xfail(reason="gather needed for idxmin"), ), pytest.param( "idxmax", marks=pytest.mark.xfail(reason="gather needed for idxmax"), ), ], ) def test_groupby_series_level_zero(agg): pdf = pd.Series([1, 2, 3], index=[2, 5, 5]) gdf = Series.from_pandas(pdf) pdg = pdf.groupby(level=0) gdg = gdf.groupby(level=0) pdresult = getattr(pdg, agg)() gdresult = getattr(gdg, agg)() check_dtype = agg not in _index_type_aggs assert_groupby_results_equal(pdresult, gdresult, check_dtype=check_dtype) def test_groupby_column_name(): pdf = pd.DataFrame({"xx": [1.0, 2.0, 3.0], "yy": [1, 2, 3]}) gdf = DataFrame.from_pandas(pdf) g = gdf.groupby("yy") p = pdf.groupby("yy") gxx = g["xx"].sum() pxx = p["xx"].sum() assert_groupby_results_equal(pxx, gxx) gxx = g["xx"].count() pxx = p["xx"].count() assert_groupby_results_equal(pxx, gxx, check_dtype=False) gxx = g["xx"].min() pxx = p["xx"].min() assert_groupby_results_equal(pxx, gxx) gxx = g["xx"].max() pxx = p["xx"].max() assert_groupby_results_equal(pxx, gxx) gxx = g["xx"].idxmin() pxx = p["xx"].idxmin() assert_groupby_results_equal(pxx, gxx, check_dtype=False) gxx = g["xx"].idxmax() pxx = p["xx"].idxmax() assert_groupby_results_equal(pxx, gxx, check_dtype=False) gxx = g["xx"].mean() pxx = p["xx"].mean() assert_groupby_results_equal(pxx, gxx) def test_groupby_column_numeral(): pdf = pd.DataFrame({0: [1.0, 2.0, 3.0], 1: [1, 2, 3]}) gdf = DataFrame.from_pandas(pdf) p = pdf.groupby(1) g = gdf.groupby(1) pxx = p[0].sum() gxx = g[0].sum() assert_groupby_results_equal(pxx, gxx) pdf = pd.DataFrame({0.5: [1.0, 2.0, 3.0], 1.5: [1, 2, 3]}) gdf = DataFrame.from_pandas(pdf) p = pdf.groupby(1.5) g = gdf.groupby(1.5) pxx = p[0.5].sum() gxx = g[0.5].sum() assert_groupby_results_equal(pxx, gxx) @pytest.mark.parametrize( "series", [ [0, 1, 0], [1, 1, 1], [0, 1, 1], [1, 2, 3], [4, 3, 2], [0, 2, 0], pd.Series([0, 2, 0]), pd.Series([0, 2, 0], index=[0, 2, 1]), ], ) # noqa: E501 def test_groupby_external_series(series): pdf = pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]}) gdf = DataFrame.from_pandas(pdf) pxx = pdf.groupby(pd.Series(series)).x.sum() gxx = gdf.groupby(cudf.Series(series)).x.sum() assert_groupby_results_equal(pxx, gxx) @pytest.mark.parametrize("series", [[0.0, 1.0], [1.0, 1.0, 1.0, 1.0]]) def test_groupby_external_series_incorrect_length(series): pdf = pd.DataFrame({"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]}) gdf = DataFrame.from_pandas(pdf) pxx = pdf.groupby(pd.Series(series)).x.sum() gxx = gdf.groupby(cudf.Series(series)).x.sum() assert_groupby_results_equal(pxx, gxx) @pytest.mark.parametrize( "level", [0, 1, "a", "b", [0, 1], ["a", "b"], ["a", 1], -1, [-1, -2]] ) def test_groupby_levels(level): idx = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (2, 2)], names=("a", "b")) pdf = pd.DataFrame({"c": [1, 2, 3], "d": [2, 3, 4]}, index=idx) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby(level=level).sum(), gdf.groupby(level=level).sum(), ) def test_advanced_groupby_levels(): pdf = pd.DataFrame({"x": [1, 2, 3], "y": [1, 2, 1], "z": [1, 1, 1]}) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby(["x", "y"]).sum() gdg = gdf.groupby(["x", "y"]).sum() assert_groupby_results_equal(pdg, gdg) pdh = pdg.groupby(level=1).sum() gdh = gdg.groupby(level=1).sum() assert_groupby_results_equal(pdh, gdh) pdg = pdf.groupby(["x", "y", "z"]).sum() gdg = gdf.groupby(["x", "y", "z"]).sum() assert_groupby_results_equal(pdg, gdg) pdg = pdf.groupby(["z"]).sum() gdg = gdf.groupby(["z"]).sum() assert_groupby_results_equal(pdg, gdg) pdg = pdf.groupby(["y", "z"]).sum() gdg = gdf.groupby(["y", "z"]).sum() assert_groupby_results_equal(pdg, gdg) pdg = pdf.groupby(["x", "z"]).sum() gdg = gdf.groupby(["x", "z"]).sum() assert_groupby_results_equal(pdg, gdg) pdg = pdf.groupby(["y"]).sum() gdg = gdf.groupby(["y"]).sum() assert_groupby_results_equal(pdg, gdg) pdg = pdf.groupby(["x"]).sum() gdg = gdf.groupby(["x"]).sum() assert_groupby_results_equal(pdg, gdg) pdh = pdg.groupby(level=0).sum() gdh = gdg.groupby(level=0).sum() assert_groupby_results_equal(pdh, gdh) pdg = pdf.groupby(["x", "y"]).sum() gdg = gdf.groupby(["x", "y"]).sum() pdh = pdg.groupby(level=[0, 1]).sum() gdh = gdg.groupby(level=[0, 1]).sum() assert_groupby_results_equal(pdh, gdh) pdh = pdg.groupby(level=[1, 0]).sum() gdh = gdg.groupby(level=[1, 0]).sum() assert_groupby_results_equal(pdh, gdh) pdg = pdf.groupby(["x", "y"]).sum() gdg = gdf.groupby(["x", "y"]).sum() assert_exceptions_equal( lfunc=pdg.groupby, rfunc=gdg.groupby, lfunc_args_and_kwargs=([], {"level": 2}), rfunc_args_and_kwargs=([], {"level": 2}), ) @pytest.mark.parametrize( "func", [ pytest.param( lambda df: df.groupby(["x", "y", "z"]).sum(), marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/issues/32464", ), ), lambda df: df.groupby(["x", "y"]).sum(), lambda df: df.groupby(["x", "y"]).agg("sum"), lambda df: df.groupby(["y"]).sum(), lambda df: df.groupby(["y"]).agg("sum"), lambda df: df.groupby(["x"]).sum(), lambda df: df.groupby(["x"]).agg("sum"), lambda df: df.groupby(["x", "y"]).z.sum(), lambda df: df.groupby(["x", "y"]).z.agg("sum"), ], ) def test_empty_groupby(func): pdf = pd.DataFrame({"x": [], "y": [], "z": []}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal(func(pdf), func(gdf), check_index_type=False) def test_groupby_unsupported_columns(): np.random.seed(12) pd_cat = pd.Categorical( pd.Series(np.random.choice(["a", "b", 1], 3), dtype="category") ) pdf = pd.DataFrame( { "x": [1, 2, 3], "y": ["a", "b", "c"], "z": ["d", "e", "f"], "a": [3, 4, 5], } ) pdf["b"] = pd_cat gdf = cudf.from_pandas(pdf) with pytest.warns(FutureWarning): pdg = pdf.groupby("x").sum() # cudf does not yet support numeric_only, so our default is False (unlike # pandas, which defaults to inferring and throws a warning about it). gdg = gdf.groupby("x").sum() assert_groupby_results_equal(pdg, gdg) def test_list_of_series(): pdf = pd.DataFrame({"x": [1, 2, 3], "y": [1, 2, 1]}) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby([pdf.x]).y.sum() gdg = gdf.groupby([gdf.x]).y.sum() assert_groupby_results_equal(pdg, gdg) pdg = pdf.groupby([pdf.x, pdf.y]).y.sum() gdg = gdf.groupby([gdf.x, gdf.y]).y.sum() pytest.skip() assert_groupby_results_equal(pdg, gdg) def test_groupby_use_agg_column_as_index(): pdf = pd.DataFrame() pdf["a"] = [1, 1, 1, 3, 5] gdf = cudf.DataFrame() gdf["a"] = [1, 1, 1, 3, 5] pdg = pdf.groupby("a").agg({"a": "count"}) gdg = gdf.groupby("a").agg({"a": "count"}) assert_groupby_results_equal(pdg, gdg, check_dtype=False) def test_groupby_list_then_string(): gdf = cudf.DataFrame() gdf["a"] = [0, 1, 0, 1, 2] gdf["b"] = [11, 2, 15, 12, 2] gdf["c"] = [6, 7, 6, 7, 6] pdf = gdf.to_pandas() gdg = gdf.groupby("a", as_index=True).agg( {"b": ["min", "max"], "c": "max"} ) pdg = pdf.groupby("a", as_index=True).agg( {"b": ["min", "max"], "c": "max"} ) assert_groupby_results_equal(gdg, pdg) def test_groupby_different_unequal_length_column_aggregations(): gdf = cudf.DataFrame() gdf["a"] = [0, 1, 0, 1, 2] gdf["b"] = [11, 2, 15, 12, 2] gdf["c"] = [11, 2, 15, 12, 2] pdf = gdf.to_pandas() gdg = gdf.groupby("a", as_index=True).agg( {"b": "min", "c": ["max", "min"]} ) pdg = pdf.groupby("a", as_index=True).agg( {"b": "min", "c": ["max", "min"]} ) assert_groupby_results_equal(pdg, gdg) def test_groupby_single_var_two_aggs(): gdf = cudf.DataFrame() gdf["a"] = [0, 1, 0, 1, 2] gdf["b"] = [11, 2, 15, 12, 2] gdf["c"] = [11, 2, 15, 12, 2] pdf = gdf.to_pandas() gdg = gdf.groupby("a", as_index=True).agg({"b": ["min", "max"]}) pdg = pdf.groupby("a", as_index=True).agg({"b": ["min", "max"]}) assert_groupby_results_equal(pdg, gdg) def test_groupby_double_var_two_aggs(): gdf = cudf.DataFrame() gdf["a"] = [0, 1, 0, 1, 2] gdf["b"] = [11, 2, 15, 12, 2] gdf["c"] = [11, 2, 15, 12, 2] pdf = gdf.to_pandas() gdg = gdf.groupby(["a", "b"], as_index=True).agg({"c": ["min", "max"]}) pdg = pdf.groupby(["a", "b"], as_index=True).agg({"c": ["min", "max"]}) assert_groupby_results_equal(pdg, gdg) def test_groupby_apply_basic_agg_single_column(): gdf = DataFrame() gdf["key"] = [0, 0, 1, 1, 2, 2, 0] gdf["val"] = [0, 1, 2, 3, 4, 5, 6] gdf["mult"] = gdf["key"] * gdf["val"] pdf = gdf.to_pandas() gdg = gdf.groupby(["key", "val"]).mult.sum() pdg = pdf.groupby(["key", "val"]).mult.sum() assert_groupby_results_equal(pdg, gdg) def test_groupby_multi_agg_single_groupby_series(): pdf = pd.DataFrame( { "x": np.random.randint(0, 5, size=10000), "y": np.random.normal(size=10000), } ) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby("x").y.agg(["sum", "max"]) gdg = gdf.groupby("x").y.agg(["sum", "max"]) assert_groupby_results_equal(pdg, gdg) def test_groupby_multi_agg_multi_groupby(): pdf = pd.DataFrame( { "a": np.random.randint(0, 5, 10), "b": np.random.randint(0, 5, 10), "c": np.random.randint(0, 5, 10), "d": np.random.randint(0, 5, 10), } ) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby(["a", "b"]).agg(["sum", "max"]) gdg = gdf.groupby(["a", "b"]).agg(["sum", "max"]) assert_groupby_results_equal(pdg, gdg) def test_groupby_datetime_multi_agg_multi_groupby(): pdf = pd.DataFrame( { "a": pd.date_range( datetime.datetime.now(), datetime.datetime.now() + datetime.timedelta(9), freq="D", ), "b": np.random.randint(0, 5, 10), "c": np.random.randint(0, 5, 10), "d": np.random.randint(0, 5, 10), } ) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby(["a", "b"]).agg(["sum", "max"]) gdg = gdf.groupby(["a", "b"]).agg(["sum", "max"]) assert_groupby_results_equal(pdg, gdg) @pytest.mark.parametrize( "agg", [ ["min", "max", "count", "mean"], ["mean", "var", "std"], ["count", "mean", "var", "std"], ], ) def test_groupby_multi_agg_hash_groupby(agg): alphabets = "abcdefghijklmnopqrstuvwxyz" prefixes = alphabets[:10] coll_dict = dict() for prefix in prefixes: for this_name in alphabets: coll_dict[prefix + this_name] = float coll_dict["id"] = int gdf = cudf.datasets.timeseries( start="2000", end="2000-01-2", dtypes=coll_dict, freq="1s", seed=1, ).reset_index(drop=True) pdf = gdf.to_pandas() check_dtype = "count" not in agg pdg = pdf.groupby("id").agg(agg) gdg = gdf.groupby("id").agg(agg) assert_groupby_results_equal(pdg, gdg, check_dtype=check_dtype) @pytest.mark.parametrize( "agg", ["min", "max", "idxmax", "idxmax", "sum", "prod", "count", "mean"] ) def test_groupby_nulls_basic(agg): check_dtype = agg not in _index_type_aggs pdf = pd.DataFrame({"a": [0, 0, 1, 1, 2, 2], "b": [1, 2, 1, 2, 1, None]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( getattr(pdf.groupby("a"), agg)(), getattr(gdf.groupby("a"), agg)(), check_dtype=check_dtype, ) pdf = pd.DataFrame( { "a": [0, 0, 1, 1, 2, 2], "b": [1, 2, 1, 2, 1, None], "c": [1, 2, 1, None, 1, 2], } ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( getattr(pdf.groupby("a"), agg)(), getattr(gdf.groupby("a"), agg)(), check_dtype=check_dtype, ) pdf = pd.DataFrame( { "a": [0, 0, 1, 1, 2, 2], "b": [1, 2, 1, 2, 1, None], "c": [1, 2, None, None, 1, 2], } ) gdf = cudf.from_pandas(pdf) # TODO: fillna() used here since we don't follow # Pandas' null semantics. Should we change it? assert_groupby_results_equal( getattr(pdf.groupby("a"), agg)().fillna(0), getattr(gdf.groupby("a"), agg)().fillna(0 if agg != "prod" else 1), check_dtype=check_dtype, ) def test_groupby_nulls_in_index(): pdf = pd.DataFrame({"a": [None, 2, 1, 1], "b": [1, 2, 3, 4]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").sum(), gdf.groupby("a").sum() ) def test_groupby_all_nulls_index(): gdf = cudf.DataFrame( { "a": cudf.Series([None, None, None, None], dtype="object"), "b": [1, 2, 3, 4], } ) pdf = gdf.to_pandas() assert_groupby_results_equal( pdf.groupby("a").sum(), gdf.groupby("a").sum() ) gdf = cudf.DataFrame( {"a": cudf.Series([np.nan, np.nan, np.nan, np.nan]), "b": [1, 2, 3, 4]} ) pdf = gdf.to_pandas() assert_groupby_results_equal( pdf.groupby("a").sum(), gdf.groupby("a").sum() ) @pytest.mark.parametrize("sort", [True, False]) def test_groupby_sort(sort): pdf = pd.DataFrame({"a": [2, 2, 1, 1], "b": [1, 2, 3, 4]}) gdf = cudf.from_pandas(pdf) assert_eq( pdf.groupby("a", sort=sort).sum(), gdf.groupby("a", sort=sort).sum(), check_like=not sort, ) pdf = pd.DataFrame( {"c": [-1, 2, 1, 4], "b": [1, 2, 3, 4], "a": [2, 2, 1, 1]} ) gdf = cudf.from_pandas(pdf) assert_eq( pdf.groupby(["c", "b"], sort=sort).sum(), gdf.groupby(["c", "b"], sort=sort).sum(), check_like=not sort, ) ps = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=[2, 2, 2, 3, 3, 1, 1, 1]) gs = cudf.from_pandas(ps) assert_eq( ps.groupby(level=0, sort=sort).sum().to_frame(), gs.groupby(level=0, sort=sort).sum().to_frame(), check_like=not sort, ) ps = pd.Series( [1, 2, 3, 4, 5, 6, 7, 8], index=pd.MultiIndex.from_product([(1, 2), ("a", "b"), (42, 84)]), ) gs = cudf.from_pandas(ps) assert_eq( ps.groupby(level=0, sort=sort).sum().to_frame(), gs.groupby(level=0, sort=sort).sum().to_frame(), check_like=not sort, ) def test_groupby_cat(): pdf = pd.DataFrame( {"a": [1, 1, 2], "b": pd.Series(["b", "b", "a"], dtype="category")} ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").count(), gdf.groupby("a").count(), check_dtype=False, ) def test_groupby_index_type(): df = cudf.DataFrame() df["string_col"] = ["a", "b", "c"] df["counts"] = [1, 2, 3] res = df.groupby(by="string_col").counts.sum() assert isinstance(res.index, cudf.StringIndex) @pytest.mark.parametrize( "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] ) @pytest.mark.parametrize("q", [0.25, 0.4, 0.5, 0.7, 1]) def test_groupby_quantile(request, interpolation, q): request.applymarker( pytest.mark.xfail( condition=(q == 0.5 and interpolation == "nearest"), reason=( "Pandas NaN Rounding will fail nearest interpolation at 0.5" ), ) ) raw_data = { "y": [None, 1, 2, 3, 4, None, 6, 7, 8, 9], "x": [1, 2, 3, 1, 2, 2, 1, None, 3, 2], } # Pandas>0.25 now casts NaN in quantile operations as a float64 # # so we are filling with zeros. pdf = pd.DataFrame(raw_data).fillna(0) gdf = DataFrame.from_pandas(pdf) pdg = pdf.groupby("x") gdg = gdf.groupby("x") pdresult = pdg.quantile(q, interpolation=interpolation) gdresult = gdg.quantile(q, interpolation=interpolation) assert_groupby_results_equal(pdresult, gdresult) def test_groupby_std(): raw_data = { "x": [1, 2, 3, 1, 2, 2, 1, None, 3, 2], "y": [None, 1, 2, 3, 4, None, 6, 7, 8, 9], } pdf = pd.DataFrame(raw_data) gdf = DataFrame.from_pandas(pdf) pdg = pdf.groupby("x") gdg = gdf.groupby("x") pdresult = pdg.std() gdresult = gdg.std() assert_groupby_results_equal(pdresult, gdresult) def test_groupby_size(): pdf = pd.DataFrame( { "a": [1, 1, 3, 4], "b": ["bob", "bob", "alice", "cooper"], "c": [1, 2, 3, 4], } ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").size(), gdf.groupby("a").size(), check_dtype=False, ) assert_groupby_results_equal( pdf.groupby(["a", "b", "c"]).size(), gdf.groupby(["a", "b", "c"]).size(), check_dtype=False, ) sr = pd.Series(range(len(pdf))) assert_groupby_results_equal( pdf.groupby(sr).size(), gdf.groupby(sr).size(), check_dtype=False, ) @pytest.mark.parametrize("index", [None, [1, 2, 3, 4]]) def test_groupby_cumcount(index): pdf = pd.DataFrame( { "a": [1, 1, 3, 4], "b": ["bob", "bob", "alice", "cooper"], "c": [1, 2, 3, 4], }, index=index, ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").cumcount(), gdf.groupby("a").cumcount(), check_dtype=False, ) assert_groupby_results_equal( pdf.groupby(["a", "b", "c"]).cumcount(), gdf.groupby(["a", "b", "c"]).cumcount(), check_dtype=False, ) sr = pd.Series(range(len(pdf)), index=index) assert_groupby_results_equal( pdf.groupby(sr).cumcount(), gdf.groupby(sr).cumcount(), check_dtype=False, ) @pytest.mark.parametrize("nelem", [2, 3, 1000]) @pytest.mark.parametrize("as_index", [True, False]) @pytest.mark.parametrize( "agg", ["min", "max", "idxmin", "idxmax", "mean", "count"] ) def test_groupby_datetime(nelem, as_index, agg): if agg == "mean" and as_index is True: return check_dtype = agg not in ("mean", "count", "idxmin", "idxmax") pdf = make_frame(pd.DataFrame, nelem=nelem, with_datetime=True) gdf = make_frame(cudf.DataFrame, nelem=nelem, with_datetime=True) pdg = pdf.groupby("datetime", as_index=as_index) gdg = gdf.groupby("datetime", as_index=as_index) if as_index is False: pdres = getattr(pdg, agg)() gdres = getattr(gdg, agg)() else: pdres = pdg.agg({"datetime": agg}) gdres = gdg.agg({"datetime": agg}) assert_groupby_results_equal( pdres, gdres, check_dtype=check_dtype, as_index=as_index, by=["datetime"], ) def test_groupby_dropna(): df = cudf.DataFrame({"a": [1, 1, None], "b": [1, 2, 3]}) expect = cudf.DataFrame( {"b": [3, 3]}, index=cudf.Series([1, None], name="a") ) got = df.groupby("a", dropna=False).sum() assert_groupby_results_equal(expect, got) df = cudf.DataFrame( {"a": [1, 1, 1, None], "b": [1, None, 1, None], "c": [1, 2, 3, 4]} ) idx = cudf.MultiIndex.from_frame( df[["a", "b"]].drop_duplicates().sort_values(["a", "b"]), names=["a", "b"], ) expect = cudf.DataFrame({"c": [4, 2, 4]}, index=idx) got = df.groupby(["a", "b"], dropna=False).sum() assert_groupby_results_equal(expect, got) def test_groupby_dropna_getattr(): df = cudf.DataFrame() df["id"] = [0, 1, 1, None, None, 3, 3] df["val"] = [0, 1, 1, 2, 2, 3, 3] got = df.groupby("id", dropna=False).val.sum() expect = cudf.Series( [0, 2, 6, 4], name="val", index=cudf.Series([0, 1, 3, None], name="id") ) assert_groupby_results_equal(expect, got) def test_groupby_categorical_from_string(): gdf = cudf.DataFrame() gdf["id"] = ["a", "b", "c"] gdf["val"] = [0, 1, 2] gdf["id"] = gdf["id"].astype("category") assert_groupby_results_equal( cudf.DataFrame({"val": gdf["val"]}).set_index(keys=gdf["id"]), gdf.groupby("id").sum(), ) def test_groupby_arbitrary_length_series(): gdf = cudf.DataFrame({"a": [1, 1, 2], "b": [2, 3, 4]}, index=[4, 5, 6]) gsr = cudf.Series([1.0, 2.0, 2.0], index=[3, 4, 5]) pdf = gdf.to_pandas() psr = gsr.to_pandas() expect = pdf.groupby(psr).sum() got = gdf.groupby(gsr).sum() assert_groupby_results_equal(expect, got) def test_groupby_series_same_name_as_dataframe_column(): gdf = cudf.DataFrame({"a": [1, 1, 2], "b": [2, 3, 4]}, index=[4, 5, 6]) gsr = cudf.Series([1.0, 2.0, 2.0], name="a", index=[3, 4, 5]) pdf = gdf.to_pandas() psr = gsr.to_pandas() expect = pdf.groupby(psr).sum() got = gdf.groupby(gsr).sum() assert_groupby_results_equal(expect, got) def test_group_by_series_and_column_name_in_by(): gdf = cudf.DataFrame( {"x": [1.0, 2.0, 3.0], "y": [1, 2, 1]}, index=[1, 2, 3] ) gsr0 = cudf.Series([0.0, 1.0, 2.0], name="a", index=[1, 2, 3]) gsr1 = cudf.Series([0.0, 1.0, 3.0], name="b", index=[3, 4, 5]) pdf = gdf.to_pandas() psr0 = gsr0.to_pandas() psr1 = gsr1.to_pandas() expect = pdf.groupby(["x", psr0, psr1]).sum() got = gdf.groupby(["x", gsr0, gsr1]).sum() assert_groupby_results_equal(expect, got) @pytest.mark.parametrize( "grouper", [ "a", ["a"], ["a", "b"], np.array([0, 1, 1, 2, 3, 2]), {0: "a", 1: "a", 2: "b", 3: "a", 4: "b", 5: "c"}, lambda x: x + 1, ["a", np.array([0, 1, 1, 2, 3, 2])], ], ) def test_grouping(grouper): pdf = pd.DataFrame( { "a": [1, 1, 1, 2, 2, 3], "b": [1, 2, 1, 2, 1, 2], "c": [1, 2, 3, 4, 5, 6], } ) gdf = cudf.from_pandas(pdf) # There's no easy way to validate that the same warning is thrown by both # cudf and pandas here because it's only thrown upon iteration, so we # settle for catching warnings on the whole block. with expect_warning_if(isinstance(grouper, list) and len(grouper) == 1): for pdf_group, gdf_group in zip( pdf.groupby(grouper), gdf.groupby(grouper) ): assert pdf_group[0] == gdf_group[0] assert_eq(pdf_group[1], gdf_group[1]) @pytest.mark.parametrize("agg", [lambda x: x.count(), "count"]) @pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]]) def test_groupby_count(agg, by): pdf = pd.DataFrame( {"a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5]} ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby(by).agg(agg) got = gdf.groupby(by).agg(agg) assert_groupby_results_equal(expect, got, check_dtype=True) @pytest.mark.parametrize("agg", [lambda x: x.median(), "median"]) @pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]]) def test_groupby_median(agg, by): pdf = pd.DataFrame( {"a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5]} ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby(by).agg(agg) got = gdf.groupby(by).agg(agg) assert_groupby_results_equal(expect, got, check_dtype=False) @pytest.mark.parametrize("agg", [lambda x: x.nunique(), "nunique"]) @pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]]) def test_groupby_nunique(agg, by): pdf = pd.DataFrame( {"a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5]} ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby(by).nunique() got = gdf.groupby(by).nunique() assert_groupby_results_equal(expect, got, check_dtype=False) @pytest.mark.parametrize( "n", [0, 1, 2, 10], ) @pytest.mark.parametrize("by", ["a", ["a", "b"], ["a", "c"]]) def test_groupby_nth(n, by): pdf = pd.DataFrame( { "a": [1, 1, 1, 2, 3], "b": [1, 2, 2, 2, 1], "c": [1, 2, None, 4, 5], "d": ["a", "b", "c", "d", "e"], } ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby(by).nth(n) got = gdf.groupby(by).nth(n) assert_groupby_results_equal(expect, got, check_dtype=False) @pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/43209", ) def test_raise_data_error(): pdf = pd.DataFrame({"a": [1, 2, 3, 4], "b": ["a", "b", "c", "d"]}) gdf = cudf.from_pandas(pdf) assert_exceptions_equal( pdf.groupby("a").mean, gdf.groupby("a").mean, ) def test_drop_unsupported_multi_agg(): gdf = cudf.DataFrame( {"a": [1, 1, 2, 2], "b": [1, 2, 3, 4], "c": ["a", "b", "c", "d"]} ) assert_groupby_results_equal( gdf.groupby("a").agg(["count", "mean"]), gdf.groupby("a").agg({"b": ["count", "mean"], "c": ["count"]}), ) @pytest.mark.parametrize( "agg", ( list(itertools.combinations(["count", "max", "min", "nunique"], 2)) + [ {"b": "min", "c": "mean"}, {"b": "max", "c": "mean"}, {"b": "count", "c": "mean"}, {"b": "nunique", "c": "mean"}, ] ), ) def test_groupby_agg_combinations(agg): pdf = pd.DataFrame( { "a": [1, 1, 2, 2, 3], "b": ["a", "a", "b", "c", "d"], "c": [1, 2, 3, 4, 5], } ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").agg(agg), gdf.groupby("a").agg(agg), check_dtype=False, ) def test_groupby_apply_noempty_group(): pdf = pd.DataFrame( {"a": [1, 1, 2, 2], "b": [1, 2, 1, 2], "c": [1, 2, 3, 4]} ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a", group_keys=False) .apply(lambda x: x.iloc[[0, 1]]) .reset_index(drop=True), gdf.groupby("a") .apply(lambda x: x.iloc[[0, 1]]) .reset_index(drop=True), ) def test_reset_index_after_empty_groupby(): # GH #5475 pdf = pd.DataFrame({"a": [1, 2, 3]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").sum().reset_index(), gdf.groupby("a").sum().reset_index(), as_index=False, by="a", ) def test_groupby_attribute_error(): err_msg = "Test error message" class TestGroupBy(cudf.core.groupby.GroupBy): @property def _groupby(self): raise AttributeError(err_msg) a = cudf.DataFrame({"a": [1, 2], "b": [2, 3]}) gb = TestGroupBy(a, a["a"]) with pytest.raises(AttributeError, match=err_msg): gb.sum() @pytest.mark.parametrize( "by", [ "a", "b", ["a"], ["b"], ["a", "b"], ["b", "a"], np.array([0, 0, 0, 1, 1, 1, 2]), ], ) def test_groupby_groups(by): pdf = pd.DataFrame( {"a": [1, 2, 1, 2, 1, 2, 3], "b": [1, 2, 3, 4, 5, 6, 7]} ) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby(by) gdg = gdf.groupby(by) for key in pdg.groups: assert key in gdg.groups assert_eq(pdg.groups[key], gdg.groups[key]) @pytest.mark.parametrize( "by", [ "a", "b", ["a"], ["b"], ["a", "b"], ["b", "a"], ["a", "c"], ["a", "b", "c"], ], ) def test_groupby_groups_multi(by): pdf = pd.DataFrame( { "a": [1, 2, 1, 2, 1, 2, 3], "b": ["a", "b", "a", "b", "b", "c", "c"], "c": [1, 2, 3, 4, 5, 6, 7], } ) gdf = cudf.from_pandas(pdf) pdg = pdf.groupby(by) gdg = gdf.groupby(by) for key in pdg.groups: assert key in gdg.groups assert_eq(pdg.groups[key], gdg.groups[key]) def test_groupby_nunique_series(): pdf = pd.DataFrame({"a": [1, 1, 1, 2, 2, 2], "b": [1, 2, 3, 1, 1, 2]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a")["b"].nunique(), gdf.groupby("a")["b"].nunique(), check_dtype=False, ) @pytest.mark.parametrize("list_agg", [list, "collect"]) def test_groupby_list_simple(list_agg): pdf = pd.DataFrame({"a": [1, 1, 1, 2, 2, 2], "b": [1, 2, None, 4, 5, 6]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").agg({"b": list}), gdf.groupby("a").agg({"b": list_agg}), check_dtype=False, ) @pytest.mark.parametrize("list_agg", [list, "collect"]) def test_groupby_list_of_lists(list_agg): pdf = pd.DataFrame( { "a": [1, 1, 1, 2, 2, 2], "b": [[1, 2], [3, None, 5], None, [], [7, 8], [9]], } ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").agg({"b": list}), gdf.groupby("a").agg({"b": list_agg}), check_dtype=False, ) @pytest.mark.parametrize("list_agg", [list, "collect"]) def test_groupby_list_of_structs(list_agg): pdf = pd.DataFrame( { "a": [1, 1, 1, 2, 2, 2], "b": [ {"c": "1", "d": 1}, {"c": "2", "d": 2}, {"c": "3", "d": 3}, {"c": "4", "d": 4}, {"c": "5", "d": 5}, {"c": "6", "d": 6}, ], } ) gdf = cudf.from_pandas(pdf) grouped = gdf.groupby("a").agg({"b": list_agg}) assert_groupby_results_equal( pdf.groupby("a").agg({"b": list}), grouped, check_dtype=True, ) assert grouped["b"].dtype.element_type == gdf["b"].dtype @pytest.mark.parametrize("list_agg", [list, "collect"]) def test_groupby_list_single_element(list_agg): pdf = pd.DataFrame({"a": [1, 2], "b": [3, None]}) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").agg({"b": list}), gdf.groupby("a").agg({"b": list_agg}), check_dtype=False, ) @pytest.mark.parametrize( "agg", [list, [list, "count"], {"b": list, "c": "sum"}] ) def test_groupby_list_strings(agg): pdf = pd.DataFrame( { "a": [1, 1, 1, 2, 2], "b": ["b", "a", None, "e", "d"], "c": [1, 2, 3, 4, 5], } ) gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby("a").agg(agg), gdf.groupby("a").agg(agg), check_dtype=False, ) def test_groupby_list_columns_excluded(): pdf = pd.DataFrame( { "a": [1, 1, 2, 2], "b": [1, 2, 3, 4], "c": [[1, 2], [3, 4], [5, 6], [7, 8]], } ) gdf = cudf.from_pandas(pdf) # cudf does not yet support numeric_only, so our default is False, but # pandas defaults to inferring and throws a warning about it, so we need to # catch that. pandas future behavior will match ours by default (at which # point supporting numeric_only=True will be the open feature request). with pytest.warns(FutureWarning): pandas_result = pdf.groupby("a").mean() with pytest.warns(FutureWarning): pandas_agg_result = pdf.groupby("a").agg("mean") assert_groupby_results_equal( pandas_result, gdf.groupby("a").mean(), check_dtype=False ) assert_groupby_results_equal( pandas_agg_result, gdf.groupby("a").agg("mean"), check_dtype=False, ) def test_groupby_pipe(): pdf = pd.DataFrame({"A": "a b a b".split(), "B": [1, 2, 3, 4]}) gdf = cudf.from_pandas(pdf) expected = pdf.groupby("A").pipe(lambda x: x.max() - x.min()) actual = gdf.groupby("A").pipe(lambda x: x.max() - x.min()) assert_groupby_results_equal(expected, actual) def create_test_groupby_apply_return_scalars_params(): def f0(x): x = x[~x["B"].isna()] ticker = x.shape[0] full = ticker / 10 return full def f1(x, k): x = x[~x["B"].isna()] ticker = x.shape[0] full = ticker / k return full def f2(x, k, L): x = x[~x["B"].isna()] ticker = x.shape[0] full = L * (ticker / k) return full def f3(x, k, L, m): x = x[~x["B"].isna()] ticker = x.shape[0] full = L * (ticker / k) % m return full return [(f0, ()), (f1, (42,)), (f2, (42, 119)), (f3, (42, 119, 212.1))] @pytest.mark.parametrize( "func,args", create_test_groupby_apply_return_scalars_params() ) def test_groupby_apply_return_scalars(func, args): pdf = pd.DataFrame( { "A": [1, 1, 2, 2, 3, 3, 4, 4, 5, 5], "B": [ 0.01, np.nan, 0.03, 0.04, np.nan, 0.06, 0.07, 0.08, 0.09, 1.0, ], } ) gdf = cudf.from_pandas(pdf) expected = pdf.groupby("A").apply(func, *args) actual = gdf.groupby("A").apply(func, *args) assert_groupby_results_equal(expected, actual) def create_test_groupby_apply_return_series_dataframe_params(): def f0(x): return x - x.max() def f1(x): return x.min() - x.max() def f2(x): return x.min() def f3(x, k): return x - x.max() + k def f4(x, k, L): return x.min() - x.max() + (k / L) def f5(x, k, L, m): return m * x.min() + (k / L) return [ (f0, ()), (f1, ()), (f2, ()), (f3, (42,)), (f4, (42, 119)), (f5, (41, 119, 212.1)), ] @pytest.mark.parametrize( "func,args", create_test_groupby_apply_return_series_dataframe_params() ) def test_groupby_apply_return_series_dataframe(func, args): pdf = pd.DataFrame( {"key": [0, 0, 1, 1, 2, 2, 2], "val": [0, 1, 2, 3, 4, 5, 6]} ) gdf = cudf.from_pandas(pdf) expected = pdf.groupby(["key"], group_keys=False).apply(func, *args) actual = gdf.groupby(["key"]).apply(func, *args) assert_groupby_results_equal(expected, actual) @pytest.mark.parametrize( "pdf", [pd.DataFrame(), pd.DataFrame({"a": []}), pd.Series([], dtype="float64")], ) def test_groupby_no_keys(pdf): gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby([]).max(), gdf.groupby([]).max(), check_dtype=False, check_index_type=False, # Int64Index v/s Float64Index ) @pytest.mark.parametrize( "pdf", [pd.DataFrame(), pd.DataFrame({"a": []}), pd.Series([], dtype="float64")], ) def test_groupby_apply_no_keys(pdf): gdf = cudf.from_pandas(pdf) assert_groupby_results_equal( pdf.groupby([], group_keys=False).apply(lambda x: x.max()), gdf.groupby([]).apply(lambda x: x.max()), check_index_type=False, # Int64Index v/s Float64Index ) @pytest.mark.parametrize( "pdf", [pd.DataFrame({"a": [1, 2]}), pd.DataFrame({"a": [1, 2], "b": [2, 3]})], ) def test_groupby_nonempty_no_keys(pdf): gdf = cudf.from_pandas(pdf) assert_exceptions_equal( lambda: pdf.groupby([]), lambda: gdf.groupby([]), ) @pytest.mark.parametrize( "by,data", [ # ([], []), # error? ([1, 1, 2, 2], [0, 0, 1, 1]), ([1, 2, 3, 4], [0, 0, 0, 0]), ([1, 2, 1, 2], [0, 1, 1, 1]), ], ) @pytest.mark.parametrize( "dtype", SIGNED_TYPES + DATETIME_TYPES + TIMEDELTA_TYPES + ["string", "category"], ) def test_groupby_unique(by, data, dtype): pdf = pd.DataFrame({"by": by, "data": data}) pdf["data"] = pdf["data"].astype(dtype) gdf = cudf.from_pandas(pdf) expect = pdf.groupby("by")["data"].unique() got = gdf.groupby("by")["data"].unique() assert_groupby_results_equal(expect, got) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) @pytest.mark.parametrize("func", ["cummin", "cummax", "cumcount", "cumsum"]) def test_groupby_2keys_scan(nelem, func): pdf = make_frame(pd.DataFrame, nelem=nelem) expect_df = pdf.groupby(["x", "y"], sort=True).agg(func) got_df = ( make_frame(DataFrame, nelem=nelem) .groupby(["x", "y"], sort=True) .agg(func) ) # pd.groupby.cumcount returns a series. if isinstance(expect_df, pd.Series): expect_df = expect_df.to_frame("val") check_dtype = func not in _index_type_aggs assert_groupby_results_equal(got_df, expect_df, check_dtype=check_dtype) @pytest.mark.parametrize("nelem", [100, 1000]) @pytest.mark.parametrize("method", ["average", "min", "max", "first", "dense"]) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("na_option", ["keep", "top", "bottom"]) @pytest.mark.parametrize("pct", [False, True]) def test_groupby_2keys_rank(nelem, method, ascending, na_option, pct): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, ], rows=nelem, use_threads=False, ) pdf = t.to_pandas() pdf.columns = ["x", "y", "z"] gdf = cudf.from_pandas(pdf) expect_df = pdf.groupby(["x", "y"], sort=True).rank( method=method, ascending=ascending, na_option=na_option, pct=pct ) got_df = gdf.groupby(["x", "y"], sort=True).rank( method=method, ascending=ascending, na_option=na_option, pct=pct ) assert_groupby_results_equal(got_df, expect_df, check_dtype=False) def test_groupby_rank_fails(): gdf = cudf.DataFrame( {"x": [1, 2, 3, 4], "y": [1, 2, 3, 4], "z": [1, 2, 3, 4]} ) with pytest.raises(NotImplementedError): gdf.groupby(["x", "y"]).rank(method="min", axis=1) gdf = cudf.DataFrame( { "a": [1, 1, 1, 2, 2, 2], "b": [[1, 2], [3, None, 5], None, [], [7, 8], [9]], } ) with pytest.raises(NotImplementedError): gdf.groupby(["a"]).rank(method="min", axis=1) @pytest.mark.parametrize( "with_nan", [False, True], ids=["just-NA", "also-NaN"] ) @pytest.mark.parametrize("dropna", [False, True], ids=["keepna", "dropna"]) @pytest.mark.parametrize( "duplicate_index", [False, True], ids=["rangeindex", "dupindex"] ) def test_groupby_scan_null_keys(with_nan, dropna, duplicate_index): key_col = [None, 1, 2, None, 3, None, 3, 1, None, 1] if with_nan: df = pd.DataFrame( {"key": pd.Series(key_col, dtype="float32"), "value": range(10)} ) else: df = pd.DataFrame( {"key": pd.Series(key_col, dtype="Int32"), "value": range(10)} ) if duplicate_index: # Non-default index with duplicates df.index = [1, 2, 3, 1, 3, 2, 4, 1, 6, 10] cdf = cudf.from_pandas(df) expect = df.groupby("key", dropna=dropna).cumsum() got = cdf.groupby("key", dropna=dropna).cumsum() assert_eq(expect, got) def test_groupby_mix_agg_scan(): err_msg = "Cannot perform both aggregation and scan in one operation" func = ["cumsum", "sum"] gb = make_frame(DataFrame, nelem=10).groupby(["x", "y"], sort=True) gb.agg(func[0]) gb.agg(func[1]) gb.agg(func[1:]) with pytest.raises(NotImplementedError, match=err_msg): gb.agg(func) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) @pytest.mark.parametrize("shift_perc", [0.5, 1.0, 1.5]) @pytest.mark.parametrize("direction", [1, -1]) @pytest.mark.parametrize("fill_value", [None, np.nan, 42]) def test_groupby_shift_row(nelem, shift_perc, direction, fill_value): pdf = make_frame(pd.DataFrame, nelem=nelem, extra_vals=["val2"]) gdf = cudf.from_pandas(pdf) n_shift = int(nelem * shift_perc) * direction expected = pdf.groupby(["x", "y"]).shift( periods=n_shift, fill_value=fill_value ) got = gdf.groupby(["x", "y"]).shift(periods=n_shift, fill_value=fill_value) assert_groupby_results_equal( expected[["val", "val2"]], got[["val", "val2"]] ) @pytest.mark.parametrize("nelem", [10, 50, 100, 1000]) @pytest.mark.parametrize("shift_perc", [0.5, 1.0, 1.5]) @pytest.mark.parametrize("direction", [1, -1]) @pytest.mark.parametrize( "fill_value", [ None, pytest.param( 0, marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/10608" ), ), pytest.param( 42, marks=pytest.mark.xfail( reason="https://github.com/rapidsai/cudf/issues/10608" ), ), ], ) def test_groupby_shift_row_mixed_numerics( nelem, shift_perc, direction, fill_value ): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, ], rows=nelem, use_threads=False, ) pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) n_shift = int(nelem * shift_perc) * direction expected = pdf.groupby(["0"]).shift(periods=n_shift, fill_value=fill_value) got = gdf.groupby(["0"]).shift(periods=n_shift, fill_value=fill_value) assert_groupby_results_equal( expected[["1", "2", "3", "4"]], got[["1", "2", "3", "4"]] ) # TODO: Shifting list columns is currently unsupported because we cannot # construct a null list scalar in python. Support once it is added. @pytest.mark.parametrize("nelem", [10, 50, 100, 1000]) @pytest.mark.parametrize("shift_perc", [0.5, 1.0, 1.5]) @pytest.mark.parametrize("direction", [1, -1]) def test_groupby_shift_row_mixed(nelem, shift_perc, direction): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "str", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, ], rows=nelem, use_threads=False, ) pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) n_shift = int(nelem * shift_perc) * direction expected = pdf.groupby(["0"]).shift(periods=n_shift) got = gdf.groupby(["0"]).shift(periods=n_shift) assert_groupby_results_equal( expected[["1", "2", "3", "4"]], got[["1", "2", "3", "4"]] ) @pytest.mark.parametrize("nelem", [10, 50, 100, 1000]) @pytest.mark.parametrize("shift_perc", [0.5, 1.0, 1.5]) @pytest.mark.parametrize("direction", [1, -1]) @pytest.mark.parametrize( "fill_value", [ [ 42, "fill", np.datetime64(123, "ns"), cudf.Scalar(456, dtype="timedelta64[ns]"), ] ], ) def test_groupby_shift_row_mixed_fill( nelem, shift_perc, direction, fill_value ): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "str", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, ], rows=nelem, use_threads=False, ) pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) n_shift = int(nelem * shift_perc) * direction # Pandas does not support specifying different fill_value by column, so we # simulate it column by column expected = pdf.copy() for col, single_fill in zip(pdf.iloc[:, 1:], fill_value): if isinstance(single_fill, cudf.Scalar): single_fill = single_fill._host_value expected[col] = ( pdf[col] .groupby(pdf["0"]) .shift(periods=n_shift, fill_value=single_fill) ) got = gdf.groupby(["0"]).shift(periods=n_shift, fill_value=fill_value) assert_groupby_results_equal( expected[["1", "2", "3", "4"]], got[["1", "2", "3", "4"]] ) @pytest.mark.parametrize("nelem", [10, 50, 100, 1000]) @pytest.mark.parametrize("fill_value", [None, 0, 42]) def test_groupby_shift_row_zero_shift(nelem, fill_value): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, ], rows=nelem, use_threads=False, ) gdf = cudf.from_pandas(t.to_pandas()) expected = gdf got = gdf.groupby(["0"]).shift(periods=0, fill_value=fill_value) assert_groupby_results_equal( expected[["1", "2", "3", "4"]], got[["1", "2", "3", "4"]] ) @pytest.mark.parametrize("nelem", [2, 3, 100, 1000]) @pytest.mark.parametrize("shift_perc", [0.5, 1.0, 1.5]) @pytest.mark.parametrize("direction", [1, -1]) def test_groupby_diff_row(nelem, shift_perc, direction): pdf = make_frame(pd.DataFrame, nelem=nelem, extra_vals=["val2"]) gdf = cudf.from_pandas(pdf) n_shift = int(nelem * shift_perc) * direction expected = pdf.groupby(["x", "y"]).diff(periods=n_shift) got = gdf.groupby(["x", "y"]).diff(periods=n_shift) assert_groupby_results_equal( expected[["val", "val2"]], got[["val", "val2"]] ) @pytest.mark.parametrize("nelem", [10, 50, 100, 1000]) @pytest.mark.parametrize("shift_perc", [0.5, 1.0, 1.5]) @pytest.mark.parametrize("direction", [1, -1]) def test_groupby_diff_row_mixed_numerics(nelem, shift_perc, direction): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "decimal64", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, ], rows=nelem, use_threads=False, ) pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) n_shift = int(nelem * shift_perc) * direction expected = pdf.groupby(["0"]).diff(periods=n_shift) got = gdf.groupby(["0"]).diff(periods=n_shift) assert_groupby_results_equal( expected[["1", "2", "3", "4", "5"]], got[["1", "2", "3", "4", "5"]] ) @pytest.mark.parametrize("nelem", [10, 50, 100, 1000]) def test_groupby_diff_row_zero_shift(nelem): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, ], rows=nelem, use_threads=False, ) gdf = cudf.from_pandas(t.to_pandas()) expected = gdf got = gdf.groupby(["0"]).shift(periods=0) assert_groupby_results_equal( expected[["1", "2", "3", "4"]], got[["1", "2", "3", "4"]] ) # TODO: test for category columns when cudf.Scalar supports category type @pytest.mark.parametrize("nelem", [10, 100, 1000]) def test_groupby_fillna_multi_value(nelem): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ms]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, {"dtype": "decimal64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "str", "null_frequency": 0.4, "cardinality": 10}, ], rows=nelem, use_threads=False, seed=0, ) key_col = "0" value_cols = ["1", "2", "3", "4", "5", "6"] pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) # fill the dataframe with the first non-null item in the column fill_values = { name: pdf[name].loc[pdf[name].first_valid_index()] for name in value_cols } # cudf can't fillna with a pandas.Timedelta type fill_values["4"] = fill_values["4"].to_numpy() expect = pdf.groupby(key_col).fillna(value=fill_values) got = gdf.groupby(key_col).fillna(value=fill_values) assert_groupby_results_equal(expect[value_cols], got[value_cols]) # TODO: test for category columns when cudf.Scalar supports category type # TODO: cudf.fillna does not support decimal column to column fill yet @pytest.mark.parametrize("nelem", [10, 100, 1000]) def test_groupby_fillna_multi_value_df(nelem): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ms]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, {"dtype": "str", "null_frequency": 0.4, "cardinality": 10}, ], rows=nelem, use_threads=False, seed=0, ) key_col = "0" value_cols = ["1", "2", "3", "4", "5"] pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) # fill the dataframe with the first non-null item in the column fill_values = { name: pdf[name].loc[pdf[name].first_valid_index()] for name in value_cols } # cudf can't fillna with a pandas.Timedelta type fill_values["4"] = fill_values["4"].to_numpy() fill_values = pd.DataFrame(fill_values, index=pdf.index) expect = pdf.groupby(key_col).fillna(value=fill_values) fill_values = cudf.from_pandas(fill_values) got = gdf.groupby(key_col).fillna(value=fill_values) assert_groupby_results_equal(expect[value_cols], got[value_cols]) @pytest.mark.parametrize( "by", [pd.Series([1, 1, 2, 2, 3, 4]), lambda x: x % 2 == 0, pd.Grouper(level=0)], ) @pytest.mark.parametrize( "data", [[1, None, 2, None, 3, None], [1, 2, 3, 4, 5, 6]] ) @pytest.mark.parametrize("args", [{"value": 42}, {"method": "ffill"}]) def test_groupby_various_by_fillna(by, data, args): ps = pd.Series(data) gs = cudf.from_pandas(ps) expect = ps.groupby(by).fillna(**args) if isinstance(by, pd.Grouper): by = cudf.Grouper(level=by.level) got = gs.groupby(by).fillna(**args) assert_groupby_results_equal(expect, got, check_dtype=False) @pytest.mark.parametrize("nelem", [10, 100, 1000]) @pytest.mark.parametrize("method", ["pad", "ffill", "backfill", "bfill"]) def test_groupby_fillna_method(nelem, method): t = rand_dataframe( dtypes_meta=[ {"dtype": "int64", "null_frequency": 0, "cardinality": 10}, {"dtype": "int64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "float32", "null_frequency": 0.4, "cardinality": 10}, { "dtype": "datetime64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "timedelta64[ns]", "null_frequency": 0.4, "cardinality": 10, }, { "dtype": "list", "null_frequency": 0.4, "cardinality": 10, "lists_max_length": 10, "nesting_max_depth": 3, "value_type": "int64", }, {"dtype": "category", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "decimal64", "null_frequency": 0.4, "cardinality": 10}, {"dtype": "str", "null_frequency": 0.4, "cardinality": 10}, ], rows=nelem, use_threads=False, seed=0, ) key_col = "0" value_cols = ["1", "2", "3", "4", "5", "6", "7", "8"] pdf = t.to_pandas() gdf = cudf.from_pandas(pdf) expect = pdf.groupby(key_col).fillna(method=method) with expect_warning_if(method in {"pad", "backfill"}): got = gdf.groupby(key_col).fillna(method=method) assert_groupby_results_equal( expect[value_cols], got[value_cols], sort=False ) @pytest.mark.parametrize( "data", [ {"Speed": [380.0, 370.0, 24.0, 26.0], "Score": [50, 30, 90, 80]}, { "Speed": [380.0, 370.0, 24.0, 26.0], "Score": [50, 30, 90, 80], "Other": [10, 20, 30, 40], }, ], ) @pytest.mark.parametrize("group", ["Score", "Speed"]) def test_groupby_describe(data, group): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) got = gdf.groupby(group).describe() expect = pdf.groupby(group).describe() assert_groupby_results_equal(expect, got, check_dtype=False) @pytest.mark.parametrize( "data", [ {"a": [], "b": []}, {"a": [2, 1, 2, 1, 1, 3], "b": [None, 1, 2, None, 2, None]}, {"a": [None], "b": [None]}, {"a": [2, 1, 1], "b": [None, 1, 0], "c": [None, 0, 1]}, ], ) @pytest.mark.parametrize("agg", ["first", "last", ["first", "last"]]) def test_groupby_first(data, agg): pdf = pd.DataFrame(data) gdf = cudf.from_pandas(pdf) expect = pdf.groupby("a").agg(agg) got = gdf.groupby("a").agg(agg) assert_groupby_results_equal(expect, got, check_dtype=False) def test_groupby_apply_series(): def foo(x): return x.sum() got = make_frame(DataFrame, 100).groupby("x").y.apply(foo) expect = make_frame(pd.DataFrame, 100).groupby("x").y.apply(foo) assert_groupby_results_equal(expect, got) @pytest.mark.parametrize( "func,args", [ (lambda x, k: x + k, (42,)), (lambda x, k, L: x + k - L, (42, 191)), (lambda x, k, L, m: (x + k) / (L * m), (42, 191, 99.9)), ], ) def test_groupby_apply_series_args(func, args): got = make_frame(DataFrame, 100).groupby("x").y.apply(func, *args) expect = ( make_frame(pd.DataFrame, 100) .groupby("x", group_keys=False) .y.apply(func, *args) ) assert_groupby_results_equal(expect, got) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("closed", [None, "left", "right"]) def test_groupby_freq_week(label, closed): pdf = pd.DataFrame( { "Publish date": [ pd.Timestamp("2000-01-03"), pd.Timestamp("2000-01-01"), pd.Timestamp("2000-01-09"), pd.Timestamp("2000-01-02"), pd.Timestamp("2000-01-07"), pd.Timestamp("2000-01-16"), ], "ID": [0, 1, 2, 3, 4, 5], "Price": [10, 20, 30, 40, 50, 60], } ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby( pd.Grouper(key="Publish date", freq="1W", label=label, closed=closed) ).mean() got = gdf.groupby( cudf.Grouper(key="Publish date", freq="1W", label=label, closed=closed) ).mean() assert_eq(expect, got, check_like=True, check_dtype=False) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("closed", [None, "left", "right"]) def test_groupby_freq_day(label, closed): pdf = pd.DataFrame( { "Publish date": [ pd.Timestamp("2000-01-03"), pd.Timestamp("2000-01-01"), pd.Timestamp("2000-01-09"), pd.Timestamp("2000-01-02"), pd.Timestamp("2000-01-07"), pd.Timestamp("2000-01-16"), ], "ID": [0, 1, 2, 3, 4, 5], "Price": [10, 20, 30, 40, 50, 60], } ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby( pd.Grouper(key="Publish date", freq="3D", label=label, closed=closed) ).mean() got = gdf.groupby( cudf.Grouper(key="Publish date", freq="3D", label=label, closed=closed) ).mean() assert_eq(expect, got, check_like=True, check_dtype=False) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("closed", [None, "left", "right"]) def test_groupby_freq_min(label, closed): pdf = pd.DataFrame( { "Publish date": [ pd.Timestamp("2000-01-01 12:01:00"), pd.Timestamp("2000-01-01 12:05:00"), pd.Timestamp("2000-01-01 15:30:00"), pd.Timestamp("2000-01-02 00:00:00"), pd.Timestamp("2000-01-01 23:47:00"), pd.Timestamp("2000-01-02 00:05:00"), ], "ID": [0, 1, 2, 3, 4, 5], "Price": [10, 20, 30, 40, 50, 60], } ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby( pd.Grouper(key="Publish date", freq="1h", label=label, closed=closed) ).mean() got = gdf.groupby( cudf.Grouper(key="Publish date", freq="1h", label=label, closed=closed) ).mean() assert_eq(expect, got, check_like=True, check_dtype=False) @pytest.mark.parametrize("label", [None, "left", "right"]) @pytest.mark.parametrize("closed", [None, "left", "right"]) def test_groupby_freq_s(label, closed): pdf = pd.DataFrame( { "Publish date": [ pd.Timestamp("2000-01-01 00:00:02"), pd.Timestamp("2000-01-01 00:00:07"), pd.Timestamp("2000-01-01 00:00:02"), pd.Timestamp("2000-01-02 00:00:15"), pd.Timestamp("2000-01-01 00:00:05"), pd.Timestamp("2000-01-02 00:00:09"), ], "ID": [0, 1, 2, 3, 4, 5], "Price": [10, 20, 30, 40, 50, 60], } ) gdf = cudf.from_pandas(pdf) expect = pdf.groupby( pd.Grouper(key="Publish date", freq="3s", label=label, closed=closed) ).mean() got = gdf.groupby( cudf.Grouper(key="Publish date", freq="3s", label=label, closed=closed) ).mean() assert_eq(expect, got, check_like=True, check_dtype=False) @pytest.mark.parametrize( "pdf, group, name, obj", [ ( pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]}), "X", "A", None, ), ( pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]}), "X", "B", None, ), ( pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]}), "X", "A", pd.DataFrame({"a": [1, 2, 4, 5, 10, 11]}), ), ( pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]}), "Y", 1, pd.DataFrame({"a": [1, 2, 4, 5, 10, 11]}), ), ( pd.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]}), "Y", 3, pd.DataFrame({"a": [1, 2, 0, 11]}), ), ], ) def test_groupby_get_group(pdf, group, name, obj): gdf = cudf.from_pandas(pdf) if isinstance(obj, pd.DataFrame): gobj = cudf.from_pandas(obj) else: gobj = obj expected = pdf.groupby(group).get_group(name=name, obj=obj) actual = gdf.groupby(group).get_group(name=name, obj=gobj) assert_groupby_results_equal(expected, actual) @pytest.mark.parametrize( "by", [ "a", ["a", "b"], pd.Series([2, 1, 1, 2, 2]), pd.Series(["b", "a", "a", "b", "b"]), ], ) @pytest.mark.parametrize("agg", ["sum", "mean", lambda df: df.mean()]) def test_groupby_transform_aggregation(by, agg): gdf = cudf.DataFrame( {"a": [2, 2, 1, 2, 1], "b": [1, 1, 1, 2, 2], "c": [1, 2, 3, 4, 5]} ) pdf = gdf.to_pandas() expected = pdf.groupby(by).transform(agg) actual = gdf.groupby(by).transform(agg) assert_groupby_results_equal(expected, actual) def test_groupby_select_then_ffill(): pdf = pd.DataFrame( { "a": [1, 1, 1, 2, 2], "b": [1, None, None, 2, None], "c": [3, None, None, 4, None], } ) gdf = cudf.from_pandas(pdf) expected = pdf.groupby("a")["c"].ffill() actual = gdf.groupby("a")["c"].ffill() assert_groupby_results_equal(expected, actual) def test_groupby_select_then_shift(): pdf = pd.DataFrame( {"a": [1, 1, 1, 2, 2], "b": [1, 2, 3, 4, 5], "c": [3, 4, 5, 6, 7]} ) gdf = cudf.from_pandas(pdf) expected = pdf.groupby("a")["c"].shift(1) actual = gdf.groupby("a")["c"].shift(1) assert_groupby_results_equal(expected, actual) def test_groupby_select_then_diff(): pdf = pd.DataFrame( {"a": [1, 1, 1, 2, 2], "b": [1, 2, 3, 4, 5], "c": [3, 4, 5, 6, 7]} ) gdf = cudf.from_pandas(pdf) expected = pdf.groupby("a")["c"].diff(1) actual = gdf.groupby("a")["c"].diff(1) assert_groupby_results_equal(expected, actual) # TODO: Add a test including datetime64[ms] column in input data @pytest.mark.parametrize("by", ["a", ["a", "b"], pd.Series([1, 2, 1, 3])]) def test_groupby_transform_maintain_index(by): # test that we maintain the index after a groupby transform gdf = cudf.DataFrame( {"a": [1, 1, 1, 2], "b": [1, 2, 1, 2]}, index=[3, 2, 1, 0] ) pdf = gdf.to_pandas() assert_groupby_results_equal( pdf.groupby(by).transform("max"), gdf.groupby(by).transform("max") ) @pytest.mark.parametrize( "data, gkey", [ ( { "id": ["a", "a", "a", "b", "b", "b", "c", "c", "c"], "val1": [5, 4, 6, 4, 8, 7, 4, 5, 2], "val2": [4, 5, 6, 1, 2, 9, 8, 5, 1], "val3": [4, 5, 6, 1, 2, 9, 8, 5, 1], }, ["id"], ), ( { "id": [0, 0, 0, 0, 1, 1, 1], "a": [1, 3, 4, 2.0, -3.0, 9.0, 10.0], "b": [10.0, 23, -4.0, 2, -3.0, None, 19.0], }, ["id", "a"], ), ( { "id": ["a", "a", "b", "b", "c", "c"], "val1": [None, None, None, None, None, None], }, ["id"], ), ], ) @pytest.mark.parametrize("periods", [-5, -2, 0, 2, 5]) @pytest.mark.parametrize("fill_method", ["ffill", "bfill", "pad", "backfill"]) def test_groupby_pct_change(data, gkey, periods, fill_method): gdf = cudf.DataFrame(data) pdf = gdf.to_pandas() with expect_warning_if(fill_method in ("pad", "backfill")): actual = gdf.groupby(gkey).pct_change( periods=periods, fill_method=fill_method ) with expect_warning_if(fill_method in ("pad", "backfill")): expected = pdf.groupby(gkey).pct_change( periods=periods, fill_method=fill_method ) assert_eq(expected, actual) @pytest.mark.xfail(reason="https://github.com/rapidsai/cudf/issues/11259") @pytest.mark.parametrize("periods", [-5, 5]) def test_groupby_pct_change_multiindex_dataframe(periods): gdf = cudf.DataFrame( { "a": [1, 1, 2, 2], "b": [1, 1, 2, 3], "c": [2, 3, 4, 5], "d": [6, 8, 9, 1], } ).set_index(["a", "b"]) actual = gdf.groupby(level=["a", "b"]).pct_change(periods) expected = gdf.to_pandas().groupby(level=["a", "b"]).pct_change(periods) assert_eq(expected, actual) def test_groupby_pct_change_empty_columns(): gdf = cudf.DataFrame(columns=["id", "val1", "val2"]) pdf = gdf.to_pandas() actual = gdf.groupby("id").pct_change() expected = pdf.groupby("id").pct_change() assert_eq(expected, actual) @pytest.mark.parametrize( "group_keys", [ None, pytest.param( True, marks=pytest.mark.xfail( condition=not PANDAS_GE_150, reason="https://github.com/pandas-dev/pandas/pull/34998", ), ), False, ], ) @pytest.mark.parametrize("by", ["A", ["A", "B"]]) def test_groupby_group_keys(group_keys, by): gdf = cudf.DataFrame( { "A": "a a a a b b".split(), "B": [1, 1, 2, 2, 3, 3], "C": [4, 6, 5, 9, 8, 7], } ) pdf = gdf.to_pandas() g_group = gdf.groupby(by, group_keys=group_keys) p_group = pdf.groupby(by, group_keys=group_keys) actual = g_group[["B", "C"]].apply(lambda x: x / x.sum()) expected = p_group[["B", "C"]].apply(lambda x: x / x.sum()) assert_eq(actual, expected) @pytest.fixture def df_ngroup(): df = cudf.DataFrame( { "a": [2, 2, 1, 1, 2, 3], "b": [1, 2, 1, 2, 1, 2], "c": ["a", "a", "b", "c", "d", "c"], }, index=[1, 3, 5, 7, 4, 2], ) df.index.name = "foo" return df @pytest.mark.parametrize( "by", [ lambda: "a", lambda: "b", lambda: ["a", "b"], lambda: "c", lambda: pd.Series([1, 2, 1, 2, 1, 2]), lambda: pd.Series(["x", "y", "y", "x", "z", "x"]), ], ) @pytest.mark.parametrize("ascending", [True, False]) def test_groupby_ngroup(by, ascending, df_ngroup): by = by() expected = df_ngroup.to_pandas().groupby(by).ngroup(ascending=ascending) actual = df_ngroup.groupby(by).ngroup(ascending=ascending) assert_eq(expected, actual, check_dtype=False) @pytest.mark.parametrize( "groups", ["a", "b", "c", ["a", "c"], ["a", "b", "c"]] ) def test_groupby_dtypes(groups): df = cudf.DataFrame( {"a": [1, 2, 3, 3], "b": ["x", "y", "z", "a"], "c": [10, 11, 12, 12]} ) pdf = df.to_pandas() assert_eq(pdf.groupby(groups).dtypes, df.groupby(groups).dtypes) @pytest.mark.parametrize("index_names", ["a", "b", "c", ["b", "c"]]) def test_groupby_by_index_names(index_names): gdf = cudf.DataFrame( {"a": [1, 2, 3, 4], "b": ["a", "b", "a", "a"], "c": [1, 1, 2, 1]} ).set_index(index_names) pdf = gdf.to_pandas() assert_groupby_results_equal( pdf.groupby(index_names).min(), gdf.groupby(index_names).min() ) @pytest.mark.parametrize( "groups", ["a", "b", "c", ["a", "c"], ["a", "b", "c"]] ) def test_group_by_pandas_compat(groups): with cudf.option_context("mode.pandas_compatible", True): df = cudf.DataFrame( { "a": [1, 3, 2, 3, 3], "b": ["x", "a", "y", "z", "a"], "c": [10, 13, 11, 12, 12], } ) pdf = df.to_pandas() assert_eq(pdf.groupby(groups).max(), df.groupby(groups).max()) class TestSample: @pytest.fixture(params=["default", "rangeindex", "intindex", "strindex"]) def index(self, request): n = 12 if request.param == "rangeindex": return cudf.RangeIndex(2, n + 2) elif request.param == "intindex": return cudf.Index( [2, 3, 4, 1, 0, 5, 6, 8, 7, 9, 10, 13], dtype="int32" ) elif request.param == "strindex": return cudf.Index(list(string.ascii_lowercase[:n])) elif request.param == "default": return None @pytest.fixture( params=[ ["a", "a", "b", "b", "c", "c", "c", "d", "d", "d", "d", "d"], [1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4], ], ids=["str-group", "int-group"], ) def df(self, index, request): return cudf.DataFrame( {"a": request.param, "b": request.param, "v": request.param}, index=index, ) @pytest.fixture(params=["a", ["a", "b"]], ids=["single-col", "two-col"]) def by(self, request): return request.param def expected(self, df, *, n=None, frac=None): value_counts = collections.Counter(df.a.values_host) if n is not None: values = list( itertools.chain.from_iterable( itertools.repeat(v, n) for v in value_counts.keys() ) ) elif frac is not None: values = list( itertools.chain.from_iterable( itertools.repeat(v, round(count * frac)) for v, count in value_counts.items() ) ) else: raise ValueError("Must provide either n or frac") values = cudf.Series(sorted(values), dtype=df.a.dtype) return cudf.DataFrame({"a": values, "b": values, "v": values}) @pytest.mark.parametrize("n", [None, 0, 1, 2]) def test_constant_n_no_replace(self, df, by, n): result = df.groupby(by).sample(n=n).sort_values("a") n = 1 if n is None else n assert_eq(self.expected(df, n=n), result.reset_index(drop=True)) def test_constant_n_no_replace_too_large_raises(self, df): with pytest.raises(ValueError): df.groupby("a").sample(n=3) @pytest.mark.parametrize("n", [1, 2, 3]) def test_constant_n_replace(self, df, by, n): result = df.groupby(by).sample(n=n, replace=True).sort_values("a") assert_eq(self.expected(df, n=n), result.reset_index(drop=True)) def test_invalid_arguments(self, df): with pytest.raises(ValueError): df.groupby("a").sample(n=1, frac=0.1) def test_not_implemented_arguments(self, df): with pytest.raises(NotImplementedError): # These are valid weights, but we don't implement this yet. df.groupby("a").sample(n=1, weights=[1 / len(df)] * len(df)) @pytest.mark.parametrize("frac", [0, 1 / 3, 1 / 2, 2 / 3, 1]) @pytest.mark.parametrize("replace", [False, True]) def test_fraction_rounding(self, df, by, frac, replace): result = ( df.groupby(by).sample(frac=frac, replace=replace).sort_values("a") ) assert_eq(self.expected(df, frac=frac), result.reset_index(drop=True)) class TestHeadTail: @pytest.fixture(params=[-3, -2, -1, 0, 1, 2, 3], ids=lambda n: f"{n=}") def n(self, request): return request.param @pytest.fixture( params=[False, True], ids=["no-preserve-order", "preserve-order"] ) def preserve_order(self, request): return request.param @pytest.fixture def df(self): return cudf.DataFrame( { "a": [1, 0, 1, 2, 2, 1, 3, 2, 3, 3, 3], "b": [0, 1, 2, 4, 3, 5, 6, 7, 9, 8, 10], } ) @pytest.fixture(params=[True, False], ids=["head", "tail"]) def take_head(self, request): return request.param @pytest.fixture def expected(self, df, n, take_head, preserve_order): if n == 0: # We'll get an empty dataframe in this case return df._empty_like(keep_index=True) else: if preserve_order: # Should match pandas here g = df.to_pandas().groupby("a") if take_head: return g.head(n=n) else: return g.tail(n=n) else: # We groupby "a" which is the first column. This # possibly relies on an implementation detail that for # integer group keys, cudf produces groups in sorted # (ascending) order. keyfunc = operator.itemgetter(0) if take_head or n == 0: # Head does group[:n] as does tail for n == 0 slicefunc = operator.itemgetter(slice(None, n)) else: # Tail does group[-n:] except when n == 0 slicefunc = operator.itemgetter( slice(-n, None) if n else slice(0) ) values_to_sort = np.hstack( [df.values_host, np.arange(len(df)).reshape(-1, 1)] ) expect_a, expect_b, index = zip( *itertools.chain.from_iterable( slicefunc(list(group)) for _, group in itertools.groupby( sorted(values_to_sort.tolist(), key=keyfunc), key=keyfunc, ) ) ) return cudf.DataFrame( {"a": expect_a, "b": expect_b}, index=index ) def test_head_tail(self, df, n, take_head, expected, preserve_order): if take_head: actual = df.groupby("a").head(n=n, preserve_order=preserve_order) else: actual = df.groupby("a").tail(n=n, preserve_order=preserve_order) assert_eq(actual, expected) def test_head_tail_empty(): # GH #13397 values = [1, 2, 3] pdf = pd.DataFrame({}, index=values) df = cudf.DataFrame({}, index=values) expected = pdf.groupby(pd.Series(values)).head() got = df.groupby(cudf.Series(values)).head() assert_eq(expected, got) expected = pdf.groupby(pd.Series(values)).tail() got = df.groupby(cudf.Series(values)).tail() assert_eq(expected, got) @pytest.mark.parametrize( "groups", ["a", "b", "c", ["a", "c"], ["a", "b", "c"]] ) @pytest.mark.parametrize("sort", [True, False]) def test_group_by_pandas_sort_order(groups, sort): with cudf.option_context("mode.pandas_compatible", True): df = cudf.DataFrame( { "a": [10, 1, 10, 3, 2, 1, 3, 3], "b": [5, 6, 7, 1, 2, 3, 4, 9], "c": [20, 20, 10, 11, 13, 11, 12, 12], } ) pdf = df.to_pandas() assert_eq( pdf.groupby(groups, sort=sort).sum(), df.groupby(groups, sort=sort).sum(), ) @pytest.mark.parametrize( "dtype", ["int32", "int64", "float64", "datetime64[ns]", "timedelta64[ns]", "bool"], ) @pytest.mark.parametrize( "reduce_op", [ "min", "max", "idxmin", "idxmax", "first", "last", ], ) def test_group_by_empty_reduction(dtype, reduce_op): gdf = cudf.DataFrame({"a": [], "b": [], "c": []}, dtype=dtype) pdf = gdf.to_pandas() gg = gdf.groupby("a")["c"] pg = pdf.groupby("a")["c"] assert_eq( getattr(gg, reduce_op)(), getattr(pg, reduce_op)(), check_dtype=True ) @pytest.mark.parametrize( "dtype", ["int32", "int64", "float64", "datetime64[ns]", "timedelta64[ns]", "bool"], ) @pytest.mark.parametrize( "apply_op", ["sum", "min", "max", "idxmax"], ) def test_group_by_empty_apply(request, dtype, apply_op): request.applymarker( pytest.mark.xfail( condition=(dtype == "datetime64[ns]" and apply_op == "sum"), reason=("sum isn't supported for datetime64[ns]"), ) ) gdf = cudf.DataFrame({"a": [], "b": [], "c": []}, dtype=dtype) pdf = gdf.to_pandas() gg = gdf.groupby("a")["c"] pg = pdf.groupby("a")["c"] assert_eq( gg.apply(apply_op), pg.apply(apply_op), check_dtype=True, check_index_type=True, ) def test_groupby_consecutive_operations(): df = cudf.DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=["A", "B"]) pdf = df.to_pandas() gg = df.groupby("A") pg = pdf.groupby("A") actual = gg.nth(-1) expected = pg.nth(-1) assert_groupby_results_equal(actual, expected, check_dtype=False) actual = gg.nth(0) expected = pg.nth(0) assert_groupby_results_equal(actual, expected, check_dtype=False) actual = gg.cumsum() expected = pg.cumsum() assert_groupby_results_equal(actual, expected, check_dtype=False) actual = gg.cumcount() expected = pg.cumcount() assert_groupby_results_equal(actual, expected, check_dtype=False) actual = gg.cumsum() expected = pg.cumsum() assert_groupby_results_equal(actual, expected, check_dtype=False) def test_categorical_grouping_pandas_compatibility(): gdf = cudf.DataFrame( { "key": cudf.Series([2, 1, 3, 1, 1], dtype="category"), "a": [0, 1, 3, 2, 3], } ) pdf = gdf.to_pandas() with cudf.option_context("mode.pandas_compatible", True): actual = gdf.groupby("key", sort=False).sum() expected = pdf.groupby("key", sort=False).sum() assert_eq(actual, expected) @pytest.mark.parametrize("normalize", [True, False]) @pytest.mark.parametrize("sort", [True, False]) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("dropna", [True, False]) @pytest.mark.parametrize("as_index", [True, False]) def test_group_by_value_counts(normalize, sort, ascending, dropna, as_index): # From Issue#12789 df = cudf.DataFrame( { "gender": ["male", "male", "female", "male", "female", "male"], "education": ["low", "medium", np.nan, "low", "high", "low"], "country": ["US", "FR", "US", "FR", "FR", "FR"], } ) pdf = df.to_pandas() actual = df.groupby("gender", as_index=as_index).value_counts( normalize=normalize, sort=sort, ascending=ascending, dropna=dropna ) expected = pdf.groupby("gender", as_index=as_index).value_counts( normalize=normalize, sort=sort, ascending=ascending, dropna=dropna ) # TODO: Remove `check_names=False` once testing against `pandas>=2.0.0` assert_groupby_results_equal( actual, expected, check_names=False, check_index_type=False ) def test_group_by_value_counts_subset(): # From Issue#12789 df = cudf.DataFrame( { "gender": ["male", "male", "female", "male", "female", "male"], "education": ["low", "medium", "high", "low", "high", "low"], "country": ["US", "FR", "US", "FR", "FR", "FR"], } ) pdf = df.to_pandas() actual = df.groupby("gender").value_counts(["education"]) expected = pdf.groupby("gender").value_counts(["education"]) # TODO: Remove `check_names=False` once testing against `pandas>=2.0.0` assert_groupby_results_equal( actual, expected, check_names=False, check_index_type=False ) def test_group_by_value_counts_clash_with_subset(): df = cudf.DataFrame({"a": [1, 5, 3], "b": [2, 5, 2]}) with pytest.raises(ValueError): df.groupby("a").value_counts(["a"]) def test_group_by_value_counts_subset_not_exists(): df = cudf.DataFrame({"a": [1, 5, 3], "b": [2, 5, 2]}) with pytest.raises(ValueError): df.groupby("a").value_counts(["c"]) def test_group_by_value_counts_with_count_column(): df = cudf.DataFrame({"a": [1, 5, 3], "count": [2, 5, 2]}) with pytest.raises(ValueError): df.groupby("a", as_index=False).value_counts()
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_contains.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import datetime import numpy as np import pandas as pd import pytest import cudf from cudf import Series from cudf.core.index import RangeIndex, as_index from cudf.testing._utils import ( DATETIME_TYPES, NUMERIC_TYPES, TIMEDELTA_TYPES, assert_eq, ) def cudf_date_series(start, stop, freq): return Series(pd.date_range(start, stop, freq=freq, name="times")) def cudf_num_series(start, stop, step=1): return Series(range(start, stop, step)) def get_categorical_series(): return Series( pd.Categorical( ["ab", "ac", "cd", "ab", "cd"], categories=["ab", "ac", "cd"] ) ) def get_string_series(): return Series(["ab", "ac", "ba", "cc", "ad"]) # If the type being searched is different from type of series, exceptions # are thrown well within the python code, and needs to be handled. # Some of the test cases check this scenario. Example : String Vs Numerical testdata_all = [ ( cudf_date_series("20010101", "20020215", freq="400h"), datetime.datetime.strptime("2001-01-01", "%Y-%m-%d"), True, ), ( cudf_date_series("20010101", "20020215", freq="400h"), datetime.datetime.strptime("2000-01-01", "%Y-%m-%d"), False, ), (cudf_date_series("20010101", "20020215", freq="400h"), 20000101, False), (get_categorical_series(), "cd", True), (get_categorical_series(), "dc", False), (get_categorical_series(), "c", False), (get_categorical_series(), "c", False), (get_categorical_series(), 1, False), (get_string_series(), "ac", True), (get_string_series(), "ca", False), (get_string_series(), "c", False), (get_string_series(), 97, False), (cudf_num_series(0, 100, 5), 60, True), (cudf_num_series(0, 100, 5), 71, False), (cudf_num_series(0, 100, 5), "a", False), ] @pytest.mark.parametrize("values, item, expected", testdata_all) def test_series_contains(values, item, expected): assert_eq(expected, item in Series(index=values)) @pytest.mark.parametrize("values, item, expected", testdata_all) def test_index_contains(values, item, expected): index = as_index(values) assert_eq(expected, item in index) def test_rangeindex_contains(): assert_eq(True, 9 in RangeIndex(start=0, stop=10, name="Index")) assert_eq(False, 10 in RangeIndex(start=0, stop=10, name="Index")) @pytest.mark.parametrize("dtype", NUMERIC_TYPES) def test_lists_contains(dtype): dtype = cudf.dtype(dtype) inner_data = np.array([1, 2, 3], dtype=dtype) data = Series([inner_data]) contained_scalar = inner_data.dtype.type(2) not_contained_scalar = inner_data.dtype.type(42) assert data.list.contains(contained_scalar)[0] assert not data.list.contains(not_contained_scalar)[0] @pytest.mark.parametrize("dtype", DATETIME_TYPES + TIMEDELTA_TYPES) def test_lists_contains_datetime(dtype): dtype = cudf.dtype(dtype) inner_data = np.array([1, 2, 3]) unit, _ = np.datetime_data(dtype) data = Series([inner_data]) contained_scalar = inner_data.dtype.type(2) not_contained_scalar = inner_data.dtype.type(42) assert data.list.contains(contained_scalar)[0] assert not data.list.contains(not_contained_scalar)[0] def test_lists_contains_bool(): data = Series([[True, True, True]]) contained_scalar = True not_contained_scalar = False assert data.list.contains(contained_scalar)[0] assert not data.list.contains(not_contained_scalar)[0]
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_s3.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import os import socket from contextlib import contextmanager from io import BytesIO import numpy as np import pandas as pd import pyarrow.fs as pa_fs import pytest from fsspec.core import get_fs_token_paths import cudf from cudf.testing._utils import assert_eq moto = pytest.importorskip("moto", minversion="3.1.6") boto3 = pytest.importorskip("boto3") s3fs = pytest.importorskip("s3fs") ThreadedMotoServer = pytest.importorskip("moto.server").ThreadedMotoServer @pytest.fixture(scope="session") def endpoint_ip(): return "127.0.0.1" @pytest.fixture(scope="session") def endpoint_port(): # Return a free port per worker session. sock = socket.socket() sock.bind(("127.0.0.1", 0)) port = sock.getsockname()[1] sock.close() return port @contextmanager def ensure_safe_environment_variables(): """ Get a context manager to safely set environment variables All changes will be undone on close, hence environment variables set within this contextmanager will neither persist nor change global state. """ saved_environ = dict(os.environ) try: yield finally: os.environ.clear() os.environ.update(saved_environ) @pytest.fixture(scope="session") def s3_base(endpoint_ip, endpoint_port): """ Fixture to set up moto server in separate process """ with ensure_safe_environment_variables(): # Fake aws credentials exported to prevent botocore looking for # system aws credentials, https://github.com/spulec/moto/issues/1793 os.environ["AWS_ACCESS_KEY_ID"] = "foobar_key" os.environ["AWS_SECRET_ACCESS_KEY"] = "foobar_secret" os.environ["S3FS_LOGGING_LEVEL"] = "DEBUG" os.environ["AWS_SECURITY_TOKEN"] = "foobar_security_token" os.environ["AWS_SESSION_TOKEN"] = "foobar_session_token" os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # Launching moto in server mode, i.e., as a separate process # with an S3 endpoint on localhost endpoint_uri = f"http://{endpoint_ip}:{endpoint_port}/" server = ThreadedMotoServer(ip_address=endpoint_ip, port=endpoint_port) server.start() yield endpoint_uri server.stop() @pytest.fixture() def s3so(endpoint_ip, endpoint_port): """ Returns s3 storage options to pass to fsspec """ endpoint_uri = f"http://{endpoint_ip}:{endpoint_port}/" return {"client_kwargs": {"endpoint_url": endpoint_uri}} @contextmanager def s3_context(s3_base, bucket, files=None): if files is None: files = {} with ensure_safe_environment_variables(): client = boto3.client("s3", endpoint_url=s3_base) client.create_bucket(Bucket=bucket, ACL="public-read-write") for f, data in files.items(): client.put_object(Bucket=bucket, Key=f, Body=data) yield s3fs.S3FileSystem(client_kwargs={"endpoint_url": s3_base}) for f, data in files.items(): try: client.delete_object(Bucket=bucket, Key=f) except Exception: pass @pytest.fixture def pdf(scope="module"): df = pd.DataFrame() df["Integer"] = np.array([2345, 11987, 9027, 9027]) df["Float"] = np.array([9.001, 8.343, 6, 2.781]) df["Integer2"] = np.array([2345, 106, 2088, 789277]) df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"]) df["Boolean"] = np.array([True, False, True, False]) return df @pytest.fixture def pdf_ext(scope="module"): size = 100 df = pd.DataFrame() df["Integer"] = np.array([i for i in range(size)]) df["List"] = [[i] for i in range(size)] df["Struct"] = [{"a": i} for i in range(size)] df["String"] = (["Alpha", "Beta", "Gamma", "Delta"] * (-(size // -4)))[ :size ] return df @pytest.mark.parametrize("bytes_per_thread", [32, 1024]) def test_read_csv(s3_base, s3so, pdf, bytes_per_thread): # Write to buffer fname = "test_csv_reader.csv" bucket = "csv" buffer = pdf.to_csv(index=False) # Use fsspec file object with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got = cudf.read_csv( f"s3://{bucket}/{fname}", storage_options=s3so, bytes_per_thread=bytes_per_thread, use_python_file_object=False, ) assert_eq(pdf, got) # Use Arrow PythonFile object with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got = cudf.read_csv( f"s3://{bucket}/{fname}", storage_options=s3so, use_python_file_object=True, ) assert_eq(pdf, got) def test_read_csv_arrow_nativefile(s3_base, s3so, pdf): # Write to buffer fname = "test_csv_reader_arrow_nativefile.csv" bucket = "csv" buffer = pdf.to_csv(index=False) with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): fs = pa_fs.S3FileSystem( endpoint_override=s3so["client_kwargs"]["endpoint_url"], ) with fs.open_input_file(f"{bucket}/{fname}") as fil: got = cudf.read_csv(fil) assert_eq(pdf, got) @pytest.mark.parametrize("bytes_per_thread", [32, 1024]) @pytest.mark.parametrize("use_python_file_object", [True, False]) def test_read_csv_byte_range( s3_base, s3so, pdf, bytes_per_thread, use_python_file_object ): # Write to buffer fname = "test_csv_reader_byte_range.csv" bucket = "csv" buffer = pdf.to_csv(index=False) # Use fsspec file object with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got = cudf.read_csv( f"s3://{bucket}/{fname}", storage_options=s3so, byte_range=(74, 73), bytes_per_thread=bytes_per_thread if not use_python_file_object else None, header=None, names=["Integer", "Float", "Integer2", "String", "Boolean"], use_python_file_object=use_python_file_object, ) assert_eq(pdf.iloc[-2:].reset_index(drop=True), got) @pytest.mark.parametrize("chunksize", [None, 3]) def test_write_csv(s3_base, s3so, pdf, chunksize): # Write to buffer fname = "test_csv_writer.csv" bucket = "csv" gdf = cudf.from_pandas(pdf) with s3_context(s3_base=s3_base, bucket=bucket) as s3fs: gdf.to_csv( f"s3://{bucket}/{fname}", index=False, chunksize=chunksize, storage_options=s3so, ) assert s3fs.exists(f"s3://{bucket}/{fname}") # TODO: Update to use `storage_options` from pandas v1.2.0 got = pd.read_csv(s3fs.open(f"s3://{bucket}/{fname}")) assert_eq(pdf, got) @pytest.mark.parametrize("bytes_per_thread", [32, 1024]) @pytest.mark.parametrize("columns", [None, ["Float", "String"]]) @pytest.mark.parametrize("precache", [None, "parquet"]) @pytest.mark.parametrize("use_python_file_object", [True, False]) def test_read_parquet( s3_base, s3so, pdf, bytes_per_thread, columns, precache, use_python_file_object, ): fname = "test_parquet_reader.parquet" bucket = "parquet" buffer = BytesIO() pdf.to_parquet(path=buffer) # Check direct path handling buffer.seek(0) with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got1 = cudf.read_parquet( f"s3://{bucket}/{fname}", open_file_options=( {"precache_options": {"method": precache}} if use_python_file_object else None ), storage_options=s3so, bytes_per_thread=bytes_per_thread, columns=columns, use_python_file_object=use_python_file_object, ) expect = pdf[columns] if columns else pdf assert_eq(expect, got1) # Check fsspec file-object handling buffer.seek(0) with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): fs = get_fs_token_paths( f"s3://{bucket}/{fname}", storage_options=s3so )[0] with fs.open(f"s3://{bucket}/{fname}", mode="rb") as f: got2 = cudf.read_parquet( f, bytes_per_thread=bytes_per_thread, columns=columns, use_python_file_object=use_python_file_object, ) assert_eq(expect, got2) @pytest.mark.parametrize("bytes_per_thread", [32, 1024]) @pytest.mark.parametrize("columns", [None, ["List", "Struct"]]) @pytest.mark.parametrize("index", [None, "Integer"]) def test_read_parquet_ext( s3_base, s3so, pdf_ext, bytes_per_thread, columns, index, ): fname = "test_parquet_reader_ext.parquet" bucket = "parquet" buffer = BytesIO() if index: pdf_ext.set_index(index).to_parquet(path=buffer) else: pdf_ext.to_parquet(path=buffer) # Check direct path handling buffer.seek(0) with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got1 = cudf.read_parquet( f"s3://{bucket}/{fname}", storage_options=s3so, bytes_per_thread=bytes_per_thread, columns=columns, ) if index: expect = ( pdf_ext.set_index(index)[columns] if columns else pdf_ext.set_index(index) ) else: expect = pdf_ext[columns] if columns else pdf_ext assert_eq(expect, got1) def test_read_parquet_multi_file(s3_base, s3so, pdf): fname_1 = "test_parquet_reader_multi_file_1.parquet" buffer_1 = BytesIO() pdf.to_parquet(path=buffer_1) buffer_1.seek(0) fname_2 = "test_parquet_reader_multi_file_2.parquet" buffer_2 = BytesIO() pdf.to_parquet(path=buffer_2) buffer_2.seek(0) bucket = "parquet" with s3_context( s3_base=s3_base, bucket=bucket, files={ fname_1: buffer_1, fname_2: buffer_2, }, ): got = cudf.read_parquet( [ f"s3://{bucket}/{fname_1}", f"s3://{bucket}/{fname_2}", ], storage_options=s3so, ).reset_index(drop=True) expect = pd.concat([pdf, pdf], ignore_index=True) assert_eq(expect, got) @pytest.mark.parametrize("columns", [None, ["Float", "String"]]) def test_read_parquet_arrow_nativefile(s3_base, s3so, pdf, columns): # Write to buffer fname = "test_parquet_reader_arrow_nativefile.parquet" bucket = "parquet" buffer = BytesIO() pdf.to_parquet(path=buffer) buffer.seek(0) with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): fs = pa_fs.S3FileSystem( endpoint_override=s3so["client_kwargs"]["endpoint_url"], ) with fs.open_input_file(f"{bucket}/{fname}") as fil: got = cudf.read_parquet(fil, columns=columns) expect = pdf[columns] if columns else pdf assert_eq(expect, got) @pytest.mark.parametrize("precache", [None, "parquet"]) def test_read_parquet_filters(s3_base, s3so, pdf_ext, precache): fname = "test_parquet_reader_filters.parquet" bucket = "parquet" buffer = BytesIO() pdf_ext.to_parquet(path=buffer) buffer.seek(0) filters = [("String", "==", "Omega")] with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got = cudf.read_parquet( f"s3://{bucket}/{fname}", storage_options=s3so, filters=filters, open_file_options={"precache_options": {"method": precache}}, ) # All row-groups should be filtered out assert_eq(pdf_ext.iloc[:0], got.reset_index(drop=True)) @pytest.mark.parametrize("partition_cols", [None, ["String"]]) def test_write_parquet(s3_base, s3so, pdf, partition_cols): fname_cudf = "test_parquet_writer_cudf" fname_pandas = "test_parquet_writer_pandas" bucket = "parquet" gdf = cudf.from_pandas(pdf) with s3_context(s3_base=s3_base, bucket=bucket) as s3fs: gdf.to_parquet( f"s3://{bucket}/{fname_cudf}", partition_cols=partition_cols, storage_options=s3so, ) assert s3fs.exists(f"s3://{bucket}/{fname_cudf}") pdf.to_parquet( f"s3://{bucket}/{fname_pandas}", partition_cols=partition_cols, storage_options=s3so, ) assert s3fs.exists(f"s3://{bucket}/{fname_pandas}") got = pd.read_parquet( f"s3://{bucket}/{fname_pandas}", storage_options=s3so ) expect = cudf.read_parquet( f"s3://{bucket}/{fname_cudf}", storage_options=s3so ) assert_eq(expect, got) def test_read_json(s3_base, s3so): fname = "test_json_reader.json" bucket = "json" buffer = ( '{"amount": 100, "name": "Alice"}\n' '{"amount": 200, "name": "Bob"}\n' '{"amount": 300, "name": "Charlie"}\n' '{"amount": 400, "name": "Dennis"}\n' ) with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got = cudf.read_json( f"s3://{bucket}/{fname}", engine="cudf", orient="records", lines=True, storage_options=s3so, ) expect = pd.read_json(buffer, lines=True) assert_eq(expect, got) @pytest.mark.parametrize("use_python_file_object", [False, True]) @pytest.mark.parametrize("columns", [None, ["string1"]]) def test_read_orc(s3_base, s3so, datadir, use_python_file_object, columns): source_file = str(datadir / "orc" / "TestOrcFile.testSnappy.orc") fname = "test_orc_reader.orc" bucket = "orc" expect = pd.read_orc(source_file) with open(source_file, "rb") as f: buffer = f.read() with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): got = cudf.read_orc( f"s3://{bucket}/{fname}", columns=columns, storage_options=s3so, use_python_file_object=use_python_file_object, ) if columns: expect = expect[columns] assert_eq(expect, got) @pytest.mark.parametrize("columns", [None, ["string1"]]) def test_read_orc_arrow_nativefile(s3_base, s3so, datadir, columns): source_file = str(datadir / "orc" / "TestOrcFile.testSnappy.orc") fname = "test_orc_reader.orc" bucket = "orc" expect = pd.read_orc(source_file) with open(source_file, "rb") as f: buffer = f.read() with s3_context(s3_base=s3_base, bucket=bucket, files={fname: buffer}): fs = pa_fs.S3FileSystem( endpoint_override=s3so["client_kwargs"]["endpoint_url"], ) with fs.open_input_file(f"{bucket}/{fname}") as fil: got = cudf.read_orc(fil, columns=columns) if columns: expect = expect[columns] assert_eq(expect, got) def test_write_orc(s3_base, s3so, pdf): fname = "test_orc_writer.orc" bucket = "orc" gdf = cudf.from_pandas(pdf) with s3_context(s3_base=s3_base, bucket=bucket) as s3fs: gdf.to_orc(f"s3://{bucket}/{fname}", storage_options=s3so) assert s3fs.exists(f"s3://{bucket}/{fname}") with s3fs.open(f"s3://{bucket}/{fname}") as f: got = pd.read_orc(f) assert_eq(pdf, got) def test_write_chunked_parquet(s3_base, s3so): df1 = cudf.DataFrame({"b": [10, 11, 12], "a": [1, 2, 3]}) df2 = cudf.DataFrame({"b": [20, 30, 50], "a": [3, 2, 1]}) dirname = "chunked_writer_directory" bucket = "parquet" from cudf.io.parquet import ParquetDatasetWriter with s3_context( s3_base=s3_base, bucket=bucket, files={dirname: BytesIO()} ) as s3fs: with ParquetDatasetWriter( f"s3://{bucket}/{dirname}", partition_cols=["a"], storage_options=s3so, ) as cw: cw.write_table(df1) cw.write_table(df2) # TODO: Replace following workaround with: # expect = cudf.read_parquet(f"s3://{bucket}/{dirname}/", # storage_options=s3so) # after the following bug is fixed: # https://issues.apache.org/jira/browse/ARROW-16438 dfs = [] for folder in {"a=1", "a=2", "a=3"}: assert s3fs.exists(f"s3://{bucket}/{dirname}/{folder}") for file in s3fs.ls(f"s3://{bucket}/{dirname}/{folder}"): df = cudf.read_parquet("s3://" + file, storage_options=s3so) dfs.append(df) actual = cudf.concat(dfs).astype("int64") assert_eq( actual.sort_values(["b"]).reset_index(drop=True), cudf.concat([df1, df2]).sort_values(["b"]).reset_index(drop=True), ) def test_no_s3fs_on_cudf_import(): import subprocess import sys output = subprocess.check_output( [ sys.executable, "-c", "import cudf; import sys; print('pyarrow._s3fs' in sys.modules)", ], cwd="/", ) assert output.strip() == b"False"
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_cuda_apply.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. """ Test method that apply GPU kernel to a frame. """ import numpy as np import pytest from numba import cuda from cudf import DataFrame from cudf.testing._utils import assert_eq @pytest.mark.parametrize("nelem", [1, 2, 64, 128, 129]) def test_df_apply_rows(nelem): def kernel(in1, in2, in3, out1, out2, extra1, extra2): for i, (x, y, z) in enumerate(zip(in1, in2, in3)): out1[i] = extra2 * x - extra1 * y out2[i] = y - extra1 * z df = DataFrame() df["in1"] = in1 = np.arange(nelem) df["in2"] = in2 = np.arange(nelem) df["in3"] = in3 = np.arange(nelem) extra1 = 2.3 extra2 = 3.4 expect_out1 = extra2 * in1 - extra1 * in2 expect_out2 = in2 - extra1 * in3 outdf = df.apply_rows( kernel, incols=["in1", "in2", "in3"], outcols=dict(out1=np.float64, out2=np.float64), kwargs=dict(extra1=extra1, extra2=extra2), ) got_out1 = outdf["out1"].to_numpy() got_out2 = outdf["out2"].to_numpy() np.testing.assert_array_almost_equal(got_out1, expect_out1) np.testing.assert_array_almost_equal(got_out2, expect_out2) @pytest.mark.parametrize("nelem", [1, 2, 64, 128, 129]) @pytest.mark.parametrize("chunksize", [1, 2, 3, 4, 23]) def test_df_apply_chunks(nelem, chunksize): def kernel(in1, in2, in3, out1, out2, extra1, extra2): for i, (x, y, z) in enumerate(zip(in1, in2, in3)): out1[i] = extra2 * x - extra1 * y + z out2[i] = i df = DataFrame() df["in1"] = in1 = np.arange(nelem) df["in2"] = in2 = np.arange(nelem) df["in3"] = in3 = np.arange(nelem) extra1 = 2.3 extra2 = 3.4 expect_out1 = extra2 * in1 - extra1 * in2 + in3 expect_out2 = np.arange(len(df)) % chunksize outdf = df.apply_chunks( kernel, incols=["in1", "in2", "in3"], outcols=dict(out1=np.float64, out2=np.int32), kwargs=dict(extra1=extra1, extra2=extra2), chunks=chunksize, ) got_out1 = outdf["out1"] got_out2 = outdf["out2"] np.testing.assert_array_almost_equal(got_out1.to_numpy(), expect_out1) np.testing.assert_array_almost_equal(got_out2.to_numpy(), expect_out2) @pytest.mark.parametrize("nelem", [1, 15, 30, 64, 128, 129]) def test_df_apply_custom_chunks(nelem): def kernel(in1, in2, in3, out1, out2, extra1, extra2): for i, (x, y, z) in enumerate(zip(in1, in2, in3)): out1[i] = extra2 * x - extra1 * y + z out2[i] = i df = DataFrame() df["in1"] = in1 = np.arange(nelem) df["in2"] = in2 = np.arange(nelem) df["in3"] = in3 = np.arange(nelem) chunks = [0, 7, 11, 29, 101, 777] chunks = [c for c in chunks if c < nelem] extra1 = 2.3 extra2 = 3.4 expect_out1 = extra2 * in1 - extra1 * in2 + in3 expect_out2 = np.hstack( [np.arange(e - s) for s, e in zip(chunks, chunks[1:] + [len(df)])] ) outdf = df.apply_chunks( kernel, incols=["in1", "in2", "in3"], outcols=dict(out1=np.float64, out2=np.int32), kwargs=dict(extra1=extra1, extra2=extra2), chunks=chunks, ) got_out1 = outdf["out1"] got_out2 = outdf["out2"] np.testing.assert_array_almost_equal(got_out1.to_numpy(), expect_out1) np.testing.assert_array_almost_equal(got_out2.to_numpy(), expect_out2) @pytest.mark.parametrize("nelem", [1, 15, 30, 64, 128, 129]) @pytest.mark.parametrize("blkct", [None, 1, 8]) @pytest.mark.parametrize("tpb", [1, 8, 64]) def test_df_apply_custom_chunks_blkct_tpb(nelem, blkct, tpb): def kernel(in1, in2, in3, out1, out2, extra1, extra2): for i in range(cuda.threadIdx.x, in1.size, cuda.blockDim.x): x = in1[i] y = in2[i] z = in3[i] out1[i] = extra2 * x - extra1 * y + z out2[i] = i * cuda.blockDim.x df = DataFrame() df["in1"] = in1 = np.arange(nelem) df["in2"] = in2 = np.arange(nelem) df["in3"] = in3 = np.arange(nelem) chunks = [0, 7, 11, 29, 101, 777] chunks = [c for c in chunks if c < nelem] extra1 = 2.3 extra2 = 3.4 expect_out1 = extra2 * in1 - extra1 * in2 + in3 expect_out2 = np.hstack( [ tpb * np.arange(e - s) for s, e in zip(chunks, chunks[1:] + [len(df)]) ] ) outdf = df.apply_chunks( kernel, incols=["in1", "in2", "in3"], outcols=dict(out1=np.float64, out2=np.int32), kwargs=dict(extra1=extra1, extra2=extra2), chunks=chunks, blkct=blkct, tpb=tpb, ) got_out1 = outdf["out1"] got_out2 = outdf["out2"] np.testing.assert_array_almost_equal(got_out1.to_numpy(), expect_out1) np.testing.assert_array_almost_equal(got_out2.to_numpy(), expect_out2) @pytest.mark.parametrize("nelem", [1, 2, 64, 128, 1000, 5000]) def test_df_apply_rows_incols_mapping(nelem): def kernel(x, y, z, out1, out2, extra1, extra2): for i, (a, b, c) in enumerate(zip(x, y, z)): out1[i] = extra2 * a - extra1 * b out2[i] = b - extra1 * c df = DataFrame() df["in1"] = in1 = np.arange(nelem) df["in2"] = in2 = np.arange(nelem) df["in3"] = in3 = np.arange(nelem) extra1 = 2.3 extra2 = 3.4 expected_out = DataFrame() expected_out["out1"] = extra2 * in1 - extra1 * in2 expected_out["out2"] = in2 - extra1 * in3 outdf = df.apply_rows( kernel, incols={"in1": "x", "in2": "y", "in3": "z"}, outcols=dict(out1=np.float64, out2=np.float64), kwargs=dict(extra1=extra1, extra2=extra2), ) assert_eq(outdf[["out1", "out2"]], expected_out) @pytest.mark.parametrize("nelem", [1, 2, 64, 128, 129]) @pytest.mark.parametrize("chunksize", [1, 2, 3, 4, 23]) def test_df_apply_chunks_incols_mapping(nelem, chunksize): def kernel(q, p, r, out1, out2, extra1, extra2): for i, (a, b, c) in enumerate(zip(q, p, r)): out1[i] = extra2 * a - extra1 * b + c out2[i] = i df = DataFrame() df["in1"] = in1 = np.arange(nelem) df["in2"] = in2 = np.arange(nelem) df["in3"] = in3 = np.arange(nelem) extra1 = 2.3 extra2 = 3.4 expected_out = DataFrame() expected_out["out1"] = extra2 * in1 - extra1 * in2 + in3 expected_out["out2"] = np.arange(len(df)) % chunksize outdf = df.apply_chunks( kernel, incols={"in1": "q", "in2": "p", "in3": "r"}, outcols=dict(out1=np.float64, out2=np.int64), kwargs=dict(extra1=extra1, extra2=extra2), chunks=chunksize, ) assert_eq(outdf[["out1", "out2"]], expected_out)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_list.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import functools import operator import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf import NA from cudf._lib.copying import get_element from cudf.api.types import is_scalar from cudf.testing._utils import ( DATETIME_TYPES, NUMERIC_TYPES, TIMEDELTA_TYPES, assert_eq, ) @pytest.mark.parametrize( "data", [ [[]], [[[]]], [[0]], [[0, 1]], [[0, 1], [2, 3]], [[[0, 1], [2]], [[3, 4]]], [[None]], [[[None]]], [[None], None], [[1, None], [1]], [[1, None], None], [[[1, None], None], None], ], ) def test_create_list_series(data): expect = pd.Series(data) got = cudf.Series(data) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ {"a": [[]]}, {"a": [[None]]}, {"a": [[1, 2, 3]]}, {"a": [[1, 2, 3]], "b": [[2, 3, 4]]}, {"a": [[1, 2, 3, None], [None]], "b": [[2, 3, 4], [5]], "c": None}, {"a": [[1]], "b": [[1, 2, 3]]}, pd.DataFrame({"a": [[1, 2, 3]]}), ], ) def test_df_list_dtypes(data): expect = pd.DataFrame(data) got = cudf.DataFrame(data) assert_eq(expect, got) @pytest.mark.parametrize( "data", [ [[]], [[[]]], [[0]], [[0, 1]], [[0, 1], [2, 3]], [[[0, 1], [2]], [[3, 4]]], [[[0, 1, None], None], None, [[3, 2, None], None]], [[["a", "c", None], None], None, [["b", "d", None], None]], ], ) def test_leaves(data): pa_array = pa.array(data) while hasattr(pa_array, "flatten"): pa_array = pa_array.flatten() expect = cudf.Series(pa_array) got = cudf.Series(data).list.leaves assert_eq( expect, got, check_dtype=not isinstance(pa_array, pa.NullArray), ) def test_list_to_pandas_nullable_true(): df = cudf.DataFrame({"a": cudf.Series([[1, 2, 3]])}) actual = df.to_pandas(nullable=True) expected = pd.DataFrame({"a": pd.Series([[1, 2, 3]])}) assert_eq(actual, expected) def test_listdtype_hash(): a = cudf.core.dtypes.ListDtype("int64") b = cudf.core.dtypes.ListDtype("int64") assert hash(a) == hash(b) c = cudf.core.dtypes.ListDtype("int32") assert hash(a) != hash(c) @pytest.fixture(params=["int", "float", "datetime", "timedelta"]) def leaf_value(request): if request.param == "int": return np.int32(1) elif request.param == "float": return np.float64(1) elif request.param == "datetime": return pd.to_datetime("1900-01-01") elif request.param == "timedelta": return pd.to_timedelta("10d") else: raise ValueError("Unhandled data type") @pytest.fixture(params=["list", "struct"]) def list_or_struct(request, leaf_value): if request.param == "list": return [[leaf_value], [leaf_value]] elif request.param == "struct": return {"a": leaf_value, "b": [leaf_value], "c": {"d": [leaf_value]}} else: raise ValueError("Unhandled data type") @pytest.fixture(params=["list", "struct"]) def nested_list(request, list_or_struct, leaf_value): if request.param == "list": return [list_or_struct, list_or_struct] elif request.param == "struct": return [ { "a": list_or_struct, "b": leaf_value, "c": {"d": list_or_struct, "e": leaf_value}, } ] else: raise ValueError("Unhandled data type") def test_list_dtype_explode(nested_list): sr = cudf.Series([nested_list]) assert sr.dtype.element_type == sr.explode().dtype @pytest.mark.parametrize( "data", [ [[]], [[1, 2, 3], [4, 5]], [[1, 2, 3], [], [4, 5]], [[1, 2, 3], None, [4, 5]], [[None, None], [None]], [[[[[[1, 2, 3]]]]]], cudf.Series([[1, 2]]).iloc[0:0], cudf.Series([None, [1, 2]]).iloc[0:1], ], ) def test_len(data): gsr = cudf.Series(data) psr = gsr.to_pandas() expect = psr.map(lambda x: len(x) if x is not None else None) got = gsr.list.len() assert_eq(expect, got, check_dtype=False) @pytest.mark.parametrize( ("data", "idx"), [ ([[1, 2, 3], [3, 4, 5], [4, 5, 6]], [[0, 1], [2], [1, 2]]), ([[1, 2, 3], [3, 4, 5], [4, 5, 6]], [[1, 2, 0], [1, 0, 2], [0, 1, 2]]), ([[1, 2, 3], []], [[0, 1], []]), ([[1, 2, 3], [None]], [[0, 1], []]), ([[1, None, 3], None], [[0, 1], []]), ], ) def test_take(data, idx): ps = pd.Series(data) gs = cudf.from_pandas(ps) expected = pd.Series(zip(ps, idx)).map( lambda x: [x[0][i] for i in x[1]] if x[0] is not None else None ) got = gs.list.take(idx) assert_eq(expected, got) @pytest.mark.parametrize( ("invalid", "exception"), [ ([[0]], pytest.raises(ValueError, match="different size")), ([1, 2, 3, 4], pytest.raises(ValueError, match="should be list type")), ( [["a", "b"], ["c"]], pytest.raises( TypeError, match="should be column of values of index types" ), ), ( [[[1], [0]], [[0]]], pytest.raises( TypeError, match="should be column of values of index types" ), ), ([[0, 1], None], pytest.raises(ValueError, match="contains null")), ], ) def test_take_invalid(invalid, exception): gs = cudf.Series([[0, 1], [2, 3]]) with exception: gs.list.take(invalid) @pytest.mark.parametrize( ("data", "expected"), [ ([[1, 1, 2, 2], [], None, [3, 4, 5]], [[1, 2], [], None, [3, 4, 5]]), ( [[1.233, np.nan, 1.234, 3.141, np.nan, 1.234]], [[1.233, 1.234, np.nan, 3.141]], ), # duplicate nans ([[1, 1, 2, 2, None, None]], [[1, 2, None]]), # duplicate nulls ( [[1.233, np.nan, None, 1.234, 3.141, np.nan, 1.234, None]], [[1.233, 1.234, np.nan, None, 3.141]], ), # duplicate nans and nulls ([[2, None, 1, None, 2]], [[1, 2, None]]), ([[], []], [[], []]), ([[], None], [[], None]), ], ) def test_unique(data, expected): """ Pandas de-duplicates nans and nulls respectively in Series.unique. `expected` is setup to mimic such behavior """ gs = cudf.Series(data, nan_as_null=False) got = gs.list.unique() expected = cudf.Series(expected, nan_as_null=False).list.sort_values() got = got.list.sort_values() assert_eq(expected, got) def key_func_builder(x, na_position): if x is None: if na_position == "first": return -1e8 else: return 1e8 else: return x @pytest.mark.parametrize( "data", [ [[4, 2, None, 9], [8, 8, 2], [2, 1]], [[4, 2, None, 9], [8, 8, 2], None], [[4, 2, None, 9], [], None], ], ) @pytest.mark.parametrize( "index", [ None, pd.Index(["a", "b", "c"]), pd.MultiIndex.from_tuples( [(0, "a"), (0, "b"), (1, "a")], names=["l0", "l1"] ), ], ) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("na_position", ["first", "last"]) @pytest.mark.parametrize("ignore_index", [True, False]) def test_sort_values(data, index, ascending, na_position, ignore_index): key_func = functools.partial(key_func_builder, na_position=na_position) ps = pd.Series(data, index=index) gs = cudf.from_pandas(ps) expected = ps.apply( lambda x: sorted(x, key=key_func, reverse=not ascending) if x is not None else None ) if ignore_index: expected.reset_index(drop=True, inplace=True) got = gs.list.sort_values( ascending=ascending, na_position=na_position, ignore_index=ignore_index ) assert_eq(expected, got) @pytest.mark.parametrize( "data, index, expect", [ ([[None, None], [None, None]], 0, [None, None]), ([[1, 2], [3, 4]], 0, [1, 3]), ([["a", "b"], ["c", "d"]], 1, ["b", "d"]), ([[1, None], [None, 2]], 1, [None, 2]), ([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], 1, [[3, 4], [7, 8]]), ], ) def test_get(data, index, expect): sr = cudf.Series(data) expect = cudf.Series(expect) got = sr.list.get(index) assert_eq(expect, got, check_dtype=not expect.isnull().all()) @pytest.mark.parametrize( "data", [ [{"k": "v1"}, {"k": "v2"}], [[{"k": "v1", "b": "v2"}], [{"k": "v3", "b": "v4"}]], [ [{"k": "v1", "b": [{"c": 10, "d": "v5"}]}], [{"k": "v3", "b": [{"c": 14, "d": "v6"}]}], ], ], ) @pytest.mark.parametrize("index", [0, 1]) def test_get_nested_struct_dtype_transfer(data, index): sr = cudf.Series([data]) expect = cudf.Series(data[index : index + 1]) assert_eq(expect, sr.list.get(index)) def test_get_nested_lists(): sr = cudf.Series( [ [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [], [[3, 4], [7, 8]]], [[], [[9, 10]], [[11, 12], [13, 14]]], ] ) expect = cudf.Series([[[1, 2], [3, 4]], []]) got = sr.list.get(0) assert_eq(expect, got) def test_get_default(): sr = cudf.Series([[1, 2], [3, 4, 5], [6, 7, 8, 9]]) assert_eq(cudf.Series([cudf.NA, 5, 8]), sr.list.get(2)) assert_eq(cudf.Series([cudf.NA, 5, 8]), sr.list.get(2, default=cudf.NA)) assert_eq(cudf.Series([0, 5, 8]), sr.list.get(2, default=0)) assert_eq(cudf.Series([0, 3, 7]), sr.list.get(-3, default=0)) assert_eq(cudf.Series([2, 5, 9]), sr.list.get(-1)) string_sr = cudf.Series( [["apple", "banana"], ["carrot", "daffodil", "elephant"]] ) assert_eq( cudf.Series(["default", "elephant"]), string_sr.list.get(2, default="default"), ) sr_with_null = cudf.Series([[0, cudf.NA], [1]]) assert_eq(cudf.Series([cudf.NA, 0]), sr_with_null.list.get(1, default=0)) sr_nested = cudf.Series([[[1, 2], [3, 4], [5, 6]], [[5, 6], [7, 8]]]) assert_eq(cudf.Series([[3, 4], [7, 8]]), sr_nested.list.get(1)) assert_eq(cudf.Series([[5, 6], cudf.NA]), sr_nested.list.get(2)) assert_eq( cudf.Series([[5, 6], [0, 0]]), sr_nested.list.get(2, default=[0, 0]) ) def test_get_ind_sequence(): # test .list.get() when `index` is a sequence sr = cudf.Series([[1, 2], [3, 4, 5], [6, 7, 8, 9]]) assert_eq(cudf.Series([1, 4, 8]), sr.list.get([0, 1, 2])) assert_eq(cudf.Series([1, 4, 8]), sr.list.get(cudf.Series([0, 1, 2]))) assert_eq(cudf.Series([cudf.NA, 5, cudf.NA]), sr.list.get([2, 2, -5])) assert_eq(cudf.Series([0, 5, 0]), sr.list.get([2, 2, -5], default=0)) sr_nested = cudf.Series([[[1, 2], [3, 4], [5, 6]], [[5, 6], [7, 8]]]) assert_eq(cudf.Series([[1, 2], [7, 8]]), sr_nested.list.get([0, 1])) @pytest.mark.parametrize( "data, scalar, expect", [ ( [[1, 2, 3], []], 1, [True, False], ), ( [[1, 2, 3], [], [3, 4, 5]], 6, [False, False, False], ), ( [[1.0, 2.0, 3.0], None, []], 2.0, [True, None, False], ), ( [[None, "b", "c"], [], ["b", "e", "f"]], "b", [True, False, True], ), ([[None, 2, 3], None, []], 1, [False, None, False]), ( [[None, "b", "c"], [], ["b", "e", "f"]], "d", [False, False, False], ), ], ) def test_contains_scalar(data, scalar, expect): sr = cudf.Series(data) expect = cudf.Series(expect) got = sr.list.contains(cudf.Scalar(scalar, sr.dtype.element_type)) assert_eq(expect, got) @pytest.mark.parametrize( "data, expect", [ ( [[1, 2, 3], []], [None, None], ), ( [[1.0, 2.0, 3.0], None, []], [None, None, None], ), ( [[None, 2, 3], [], None], [None, None, None], ), ( [[1, 2, 3], [3, 4, 5]], [None, None], ), ( [[], [], []], [None, None, None], ), ], ) def test_contains_null_search_key(data, expect): sr = cudf.Series(data) expect = cudf.Series(expect, dtype="bool") got = sr.list.contains(cudf.Scalar(cudf.NA, sr.dtype.element_type)) assert_eq(expect, got) @pytest.mark.parametrize( "data, scalar", [ ( [[9, 0, 2], [], [1, None, 0]], "x", ), ( [["z", "y", None], None, [None, "x"]], 5, ), ], ) def test_contains_invalid(data, scalar): sr = cudf.Series(data) with pytest.raises( TypeError, match="Type/Scale of search key does not " "match list column element type.", ): sr.list.contains(scalar) @pytest.mark.parametrize( "data, search_key, expect", [ ( [[1, 2, 3], [], [3, 4, 5]], 3, [2, -1, 0], ), ( [[1.0, 2.0, 3.0], None, [2.0, 5.0]], 2.0, [1, None, 0], ), ( [[None, "b", "c"], [], ["b", "e", "f"]], "f", [-1, -1, 2], ), ([[-5, None, 8], None, []], -5, [0, None, -1]), ( [[None, "x", None, "y"], ["z", "i", "j"]], "y", [3, -1], ), ( [["h", "a", None], ["t", "g"]], ["a", "b"], [1, -1], ), ( [None, ["h", "i"], ["p", "k", "z"]], ["x", None, "z"], [None, None, 2], ), ( [["d", None, "e"], [None, "f"], []], cudf.Scalar(cudf.NA, "O"), [None, None, None], ), ( [None, [10, 9, 8], [5, 8, None]], cudf.Scalar(cudf.NA, "int64"), [None, None, None], ), ], ) def test_index(data, search_key, expect): sr = cudf.Series(data) expect = cudf.Series(expect, dtype="int32") if is_scalar(search_key): got = sr.list.index(cudf.Scalar(search_key, sr.dtype.element_type)) else: got = sr.list.index( cudf.Series(search_key, dtype=sr.dtype.element_type) ) assert_eq(expect, got) @pytest.mark.parametrize( "data, search_key", [ ( [[9, None, 8], [], [7, 6, 5]], "c", ), ( [["a", "b", "c"], None, [None, "d"]], 2, ), ( [["e", "s"], ["t", "w"]], [5, 6], ), ], ) def test_index_invalid_type(data, search_key): sr = cudf.Series(data) with pytest.raises( TypeError, match="Type/Scale of search key does not " "match list column element type.", ): sr.list.index(search_key) @pytest.mark.parametrize( "data, search_key", [ ( [[5, 8], [2, 6]], [8, 2, 4], ), ( [["h", "j"], ["p", None], ["t", "z"]], ["j", "a"], ), ], ) def test_index_invalid_length(data, search_key): sr = cudf.Series(data) with pytest.raises( RuntimeError, match="Number of search keys must match list column size.", ): sr.list.index(search_key) @pytest.mark.parametrize( "row", [ [[]], [[1]], [[1, 2]], [[1, 2], [3, 4, 5]], [[1, 2], [], [3, 4, 5]], [[1, 2, None], [3, 4, 5]], [[1, 2, None], None, [3, 4, 5]], [[1, 2, None], None, [], [3, 4, 5]], [[[1, 2], [3, 4]], [[5, 6, 7], [8, 9]]], [[["a", "c", "de", None], None, ["fg"]], [["abc", "de"], None]], ], ) @pytest.mark.parametrize("dropna", [True, False]) def test_concat_elements(row, dropna): if any(x is None for x in row): if dropna: row = [x for x in row if x is not None] result = functools.reduce(operator.add, row) else: result = None else: result = functools.reduce(operator.add, row) expect = pd.Series([result]) got = cudf.Series([row]).list.concat(dropna=dropna) assert_eq(expect, got) def test_concat_elements_raise(): s = cudf.Series([[1, 2, 3]]) # no nesting with pytest.raises(ValueError): s.list.concat() def test_concatenate_rows_of_lists(): pdf = pd.DataFrame({"val": [["a", "a"], ["b"], ["c"]]}) gdf = cudf.from_pandas(pdf) expect = pdf["val"] + pdf["val"] got = gdf["val"] + gdf["val"] assert_eq(expect, got) def test_concatenate_list_with_nonlist(): with pytest.raises(TypeError): gdf1 = cudf.DataFrame({"A": [["a", "c"], ["b", "d"], ["c", "d"]]}) gdf2 = cudf.DataFrame({"A": ["a", "b", "c"]}) gdf1["A"] + gdf2["A"] @pytest.mark.parametrize( "data", [ [1], [1, 2, 3], [[1, 2, 3], [4, 5, 6]], [NA], [1, NA, 3], [[1, NA, 3], [NA, 5, 6]], ], ) def test_list_getitem(data): list_sr = cudf.Series([data]) assert list_sr[0] == data @pytest.mark.parametrize( "data", [ [1, 2, 3], [[1, 2, 3], [4, 5, 6]], ["a", "b", "c"], [["a", "b", "c"], ["d", "e", "f"]], [1.1, 2.2, 3.3], [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], [1, NA, 3], [[1, NA, 3], [4, 5, NA]], ["a", NA, "c"], [["a", NA, "c"], ["d", "e", NA]], [1.1, NA, 3.3], [[1.1, NA, 3.3], [4.4, 5.5, NA]], ], ) def test_list_scalar_host_construction(data): slr = cudf.Scalar(data) assert slr.value == data assert slr.device_value.value == data @pytest.mark.parametrize( "elem_type", NUMERIC_TYPES + DATETIME_TYPES + TIMEDELTA_TYPES + ["str"] ) @pytest.mark.parametrize("nesting_level", [1, 2, 3]) def test_list_scalar_host_construction_null(elem_type, nesting_level): dtype = cudf.ListDtype(elem_type) for level in range(nesting_level - 1): dtype = cudf.ListDtype(dtype) slr = cudf.Scalar(None, dtype=dtype) assert slr.value is ( cudf.NaT if cudf.api.types.is_datetime64_dtype(slr.dtype) or cudf.api.types.is_timedelta64_dtype(slr.dtype) else cudf.NA ) @pytest.mark.parametrize( "data", [ [1, 2, 3], [[1, 2, 3], [4, 5, 6]], ["a", "b", "c"], [["a", "b", "c"], ["d", "e", "f"]], [1.1, 2.2, 3.3], [[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], [1, NA, 3], [[1, NA, 3], [4, 5, NA]], ["a", NA, "c"], [["a", NA, "c"], ["d", "e", NA]], [1.1, NA, 3.3], [[1.1, NA, 3.3], [4.4, 5.5, NA]], ], ) def test_list_scalar_device_construction(data): col = cudf.Series([data])._column slr = get_element(col, 0) assert slr.value == data @pytest.mark.parametrize("nesting_level", [1, 2, 3]) def test_list_scalar_device_construction_null(nesting_level): data = [[]] for i in range(nesting_level - 1): data = [data] arrow_type = pa.infer_type(data) arrow_arr = pa.array([None], type=arrow_type) col = cudf.Series(arrow_arr)._column slr = get_element(col, 0) assert slr.value is cudf.NA @pytest.mark.parametrize("input_obj", [[[1, NA, 3]], [[1, NA, 3], [4, 5, NA]]]) def test_construction_series_with_nulls(input_obj): expect = pa.array(input_obj, from_pandas=True) got = cudf.Series(input_obj).to_arrow() assert expect == got @pytest.mark.parametrize( "data", [ {"a": [[]]}, {"a": [[1, 2, None, 4]]}, {"a": [["cat", None, "dog"]]}, { "a": [[1, 2, 3, None], [4, None, 5]], "b": [None, ["fish", "bird"]], "c": [[], []], }, {"a": [[1, 2, 3, None], [4, None, 5], None, [6, 7]]}, ], ) def test_serialize_list_columns(data): df = cudf.DataFrame(data) recreated = df.__class__.deserialize(*df.serialize()) assert_eq(recreated, df) @pytest.mark.parametrize( "data,item", [ ( # basic list into a list column [[1, 2, 3], [4, 5, 6], [7, 8, 9]], [0, 0, 0], ), ( # nested list into nested list column [ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], ], [[0, 0, 0], [0, 0, 0]], ), ( # NA into a list column [[1, 2, 3], [4, 5, 6], [7, 8, 9]], NA, ), ( # NA into nested list column [ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], ], NA, ), ], ) def test_listcol_setitem(data, item): sr = cudf.Series(data) sr[1] = item data[1] = item expect = cudf.Series(data) assert_eq(expect, sr) @pytest.mark.parametrize( "data", [ [[1, 2, 3], [4, 5, 6], [7, 8, 9]], [ [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]], ], [[[1, 2, 3], [4, None, 6]], [], None, [[7, 8], [], None, [9]]], [[1, 2, 3], [4, None, 6], [7, 8], [], None, [9]], [[1.0, 2.0, 3.0], [4.0, None, 6.0], [7.0, 8.0], [], None, [9.0]], ], ) def test_listcol_as_string(data): got = cudf.Series(data).astype("str") expect = pd.Series(data).astype("str") assert_eq(expect, got) @pytest.mark.parametrize( "data,item,error", [ ( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[1, 2, 3], [4, 5, 6]], "list nesting level mismatch", ), ( [[1, 2, 3], [4, 5, 6], [7, 8, 9]], 0, "Can not set 0 into ListColumn", ), ], ) def test_listcol_setitem_error_cases(data, item, error): sr = cudf.Series(data) with pytest.raises(BaseException, match=error): sr[1] = item def test_listcol_setitem_retain_dtype(): df = cudf.DataFrame( {"a": cudf.Series([["a", "b"], []]), "b": [1, 2], "c": [123, 321]} ) df1 = df.head(0) # Performing a setitem on `b` triggers a `column.column_empty_like` call # which tries to create an empty ListColumn. df1["b"] = df1["c"] # Performing a copy to trigger a copy dtype which is obtained by accessing # `ListColumn.children` that would have been corrupted in previous call # prior to this fix: https://github.com/rapidsai/cudf/pull/10151/ df2 = df1.copy() assert df2["a"].dtype == df["a"].dtype def test_list_astype(): s = cudf.Series([[1, 2], [3, 4]]) s2 = s.list.astype("float64") assert s2.dtype == cudf.ListDtype("float64") assert_eq(s.list.leaves.astype("float64"), s2.list.leaves) s = cudf.Series([[[1, 2], [3]], [[5, 6], None]]) s2 = s.list.astype("string") assert s2.dtype == cudf.ListDtype(cudf.ListDtype("string")) assert_eq(s.list.leaves.astype("string"), s2.list.leaves) def test_memory_usage(): s1 = cudf.Series([[1, 2], [3, 4]]) assert s1.memory_usage() == 44 s2 = cudf.Series([[[[1, 2]]], [[[3, 4]]]]) assert s2.memory_usage() == 68 s3 = cudf.Series([[{"b": 1, "a": 10}, {"b": 2, "a": 100}]]) assert s3.memory_usage() == 40 @pytest.mark.parametrize( "data, idx", [ ( [[{"f2": {"a": 100}, "f1": "a"}, {"f1": "sf12", "f2": NA}]], 0, ), ( [ [ {"f2": {"a": 100, "c": 90, "f2": 10}, "f1": "a"}, {"f1": "sf12", "f2": NA}, ] ], 0, ), ( [[[[1, 2]], [[2], [3]]], [[[2]]], [[[3]]]], 0, ), ([[[[1, 2]], [[2], [3]]], [[[2]]], [[[3]]]], 2), ([[[{"a": 1, "b": 2, "c": 10}]]], 0), ], ) def test_nested_list_extract_host_scalars(data, idx): series = cudf.Series(data) assert series[idx] == data[idx] def test_list_iterate_error(): s = cudf.Series([[[[1, 2]], [[2], [3]]], [[[2]]], [[[3]]]]) with pytest.raises(TypeError): iter(s.list) def test_list_struct_list_memory_usage(): df = cudf.DataFrame({"a": [[{"b": [1]}]]}) assert df.memory_usage().sum() == 16
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_serialize.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import pickle import msgpack import numpy as np import pandas as pd import pytest import cudf from cudf.core._compat import PANDAS_GE_200 from cudf.testing import _utils as utils from cudf.testing._utils import assert_eq @pytest.mark.parametrize( "df", [ lambda: cudf.Index([1, 2, 3]), lambda: cudf.Index([1.0, 2.0, 3.0]), lambda: cudf.Series([1, 2, 3]), lambda: cudf.Series([1, 2, 3], index=[4, 5, 6]), lambda: cudf.Series([1, None, 3]), lambda: cudf.Series([1, 2, 3], index=[4, 5, None]), lambda: cudf.Series([1, 2, 3])[:2], lambda: cudf.Series([1, 2, 3])[:2]._column, lambda: cudf.Series(["a", "bb", "ccc"]), lambda: cudf.Series(["a", None, "ccc"]), lambda: cudf.Series( [ {"a": ({"b": [1, 2, 3], "c": [4, 5, 6]}, {"d": [2, 4, 6]})}, {"e": ({"b": [0, 2, 4], "c": [-1, -2, -3]}, {"d": [1, 1, 1]})}, ] ), lambda: cudf.Series( [ 14.12302, 97938.2, np.nan, 0.0, -8.302014, np.nan, -112.2314, ] ).astype(cudf.Decimal64Dtype(7, 2)), lambda: cudf.DataFrame({"x": [1, 2, 3]}), lambda: cudf.DataFrame({"x": [1, 2, 3], "y": [1.0, None, 3.0]}), lambda: cudf.DataFrame( {"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]}, index=[1, None, 3] ), lambda: cudf.DataFrame( {"x": [1, 2, 3], "y": [1.0, None, 3.0]}, index=[1, None, 3] ), lambda: cudf.DataFrame( {"x": ["a", "bb", "ccc"], "y": [1.0, None, 3.0]}, index=[1, None, 3], ), lambda: pd.Index([True, False] * 5), lambda: pd.CategoricalIndex(["a", "b", "a", "b"], ["a", "b", "c"]), lambda: ( cudf.DataFrame( { "a": [1, 2, 3], "b": ["c", "e", "g"], "d": [True, False, True], }, index=cudf.MultiIndex.from_tuples( [("i1", "i2"), ("i3", "i4"), ("i5", "i6")], names=["foo", "bar"], ), ) ), lambda: cudf.Index( cudf.date_range(start="2011-01-01", end="2012-01-01", periods=13) ), lambda: cudf.Index([1.2, 3.4, 5.6]), lambda: cudf.Series([1.2, 3.4, 5.6]), lambda: pd.IntervalIndex.from_breaks(range(10)), lambda: cudf.MultiIndex.from_tuples( [("i1", "i2"), ("i3", "i4"), ("i5", "i6")], names=["foo", "bar"] ), lambda: cudf.RangeIndex(10), lambda: cudf.DataFrame( {"a": list(range(13)), "b": [float(x) for x in range(13)]}, index=cudf.Index( cudf.date_range( start="2011-01-01", end="2012-01-01", periods=13 ) ), ), lambda: cudf.Series( list(range(13)), index=cudf.Index( cudf.date_range( start="2011-01-01", end="2012-01-01", periods=13 ) ), ), lambda: cudf.TimedeltaIndex( [1132223, 2023232, 342234324, 4234324], dtype="timedelta64[ns]", name="foo", ), lambda: cudf.Index( [ "y7ssMP1PWJ", "rZDLbzIQsX", "NrPwYMsxNw", "4zja1Vw9Rq", "Y9TNDhjXgR", "Ryjt7up2hT", "dxYKtRGHkb", "nMCWj5yhMu", "Rt7S362FNX", "OGbssOJLUI", ] ), ], ) @pytest.mark.parametrize("to_host", [True, False]) def test_serialize(df, to_host): """This should hopefully replace all functions below""" a = df() if "cudf" not in type(a).__module__: a = cudf.from_pandas(a) if to_host: header, frames = a.host_serialize() else: header, frames = a.device_serialize() msgpack.dumps(header) # ensure that header is msgpack serializable ndevice = 0 for frame in frames: if hasattr(frame, "__cuda_array_interface__"): ndevice += 1 # Indices etc. will not be DeviceNDArray # but data should be... if to_host: assert ndevice == 0 elif hasattr(df, "_cols"): assert ndevice >= len(df._data) else: # If there are frames, something should be on the device assert ndevice > 0 or not frames typ = type(a) b = typ.deserialize(header, frames) assert_eq(a, b) def test_serialize_dtype_error_checking(): dtype = cudf.IntervalDtype("float", "right") header, frames = dtype.serialize() with pytest.raises(AssertionError): # Invalid number of frames type(dtype).deserialize(header, [None] * (header["frame_count"] + 1)) with pytest.raises(AssertionError): # mismatching class cudf.StructDtype.deserialize(header, frames) def test_serialize_dataframe(): df = cudf.DataFrame() df["a"] = np.arange(100) df["b"] = np.arange(100, dtype=np.float32) df["c"] = pd.Categorical( ["a", "b", "c", "_", "_"] * 20, categories=["a", "b", "c"] ) outdf = cudf.DataFrame.deserialize(*df.serialize()) assert_eq(df, outdf) def test_serialize_dataframe_with_index(): df = cudf.DataFrame() df["a"] = np.arange(100) df["b"] = np.random.random(100) df["c"] = pd.Categorical( ["a", "b", "c", "_", "_"] * 20, categories=["a", "b", "c"] ) df = df.sort_values("b") outdf = cudf.DataFrame.deserialize(*df.serialize()) assert_eq(df, outdf) def test_serialize_series(): sr = cudf.Series(np.arange(100)) outsr = cudf.Series.deserialize(*sr.serialize()) assert_eq(sr, outsr) def test_serialize_range_index(): index = cudf.core.index.RangeIndex(10, 20) outindex = cudf.core.index.RangeIndex.deserialize(*index.serialize()) assert_eq(index, outindex) def test_serialize_generic_index(): index = cudf.core.index.GenericIndex(cudf.Series(np.arange(10))) outindex = cudf.core.index.GenericIndex.deserialize(*index.serialize()) assert_eq(index, outindex) def test_serialize_multi_index(): pdf = pd.DataFrame( { "a": [4, 17, 4, 9, 5], "b": [1, 4, 4, 3, 2], "x": np.random.normal(size=5), } ) gdf = cudf.DataFrame.from_pandas(pdf) gdg = gdf.groupby(["a", "b"]).sum() multiindex = gdg.index outindex = cudf.core.multiindex.MultiIndex.deserialize( *multiindex.serialize() ) assert_eq(multiindex, outindex) def test_serialize_masked_series(): nelem = 50 data = np.random.random(nelem) mask = utils.random_bitmask(nelem) bitmask = utils.expand_bits_to_bytes(mask)[:nelem] null_count = utils.count_zero(bitmask) assert null_count >= 0 sr = cudf.Series.from_masked_array(data, mask, null_count=null_count) outsr = cudf.Series.deserialize(*sr.serialize()) assert_eq(sr, outsr) def test_serialize_groupby_df(): df = cudf.DataFrame() df["key_1"] = np.random.randint(0, 20, 100) df["key_2"] = np.random.randint(0, 20, 100) df["val"] = np.arange(100, dtype=np.float32) gb = df.groupby(["key_1", "key_2"], sort=True) outgb = gb.deserialize(*gb.serialize()) expect = gb.mean() got = outgb.mean() assert_eq(got.sort_index(), expect.sort_index()) def test_serialize_groupby_external(): df = cudf.DataFrame() df["val"] = np.arange(100, dtype=np.float32) gb = df.groupby(cudf.Series(np.random.randint(0, 20, 100))) outgb = gb.deserialize(*gb.serialize()) expect = gb.mean() got = outgb.mean() assert_eq(got.sort_index(), expect.sort_index()) def test_serialize_groupby_level(): idx = pd.MultiIndex.from_tuples([(1, 1), (1, 2), (2, 2)], names=("a", "b")) pdf = pd.DataFrame({"c": [1, 2, 3], "d": [2, 3, 4]}, index=idx) df = cudf.from_pandas(pdf) gb = df.groupby(level="a") expect = gb.mean() outgb = gb.deserialize(*gb.serialize()) got = outgb.mean() assert_eq(expect.sort_index(), got.sort_index()) def test_serialize_groupby_sr(): sr = cudf.Series(np.random.randint(0, 20, 100)) gb = sr.groupby(sr // 2) outgb = gb.deserialize(*gb.serialize()) got = gb.mean() expect = outgb.mean() assert_eq(got.sort_index(), expect.sort_index()) def test_serialize_datetime(): # Make frame with datetime column df = pd.DataFrame( {"x": np.random.randint(0, 5, size=20), "y": np.random.normal(size=20)} ) ts = np.arange(0, len(df), dtype=np.dtype("datetime64[ms]")) df["timestamp"] = ts gdf = cudf.DataFrame.from_pandas(df) # (De)serialize roundtrip recreated = cudf.DataFrame.deserialize(*gdf.serialize()) # Check assert_eq(recreated, df) def test_serialize_string(): # Make frame with string column df = pd.DataFrame( {"x": np.random.randint(0, 5, size=5), "y": np.random.normal(size=5)} ) str_data = ["a", "bc", "def", "ghij", "klmno"] df["timestamp"] = str_data gdf = cudf.DataFrame.from_pandas(df) # (De)serialize roundtrip recreated = cudf.DataFrame.deserialize(*gdf.serialize()) # Check assert_eq(recreated, df) @pytest.mark.parametrize( "frames", [ (cudf.Series([], dtype="str"), pd.Series([], dtype="str")), pytest.param( (cudf.DataFrame([]), pd.DataFrame([])), marks=pytest.mark.xfail( not PANDAS_GE_200, reason=".column returns Index[object]" ), ), (cudf.DataFrame([1]).head(0), pd.DataFrame([1]).head(0)), (cudf.DataFrame({"a": []}), pd.DataFrame({"a": []})), ( cudf.DataFrame({"a": ["a"]}).head(0), pd.DataFrame({"a": ["a"]}).head(0), ), ( cudf.DataFrame({"a": [1.0]}).head(0), pd.DataFrame({"a": [1.0]}).head(0), ), ], ) def test_serialize_empty(frames): gdf, pdf = frames typ = type(gdf) res = typ.deserialize(*gdf.serialize()) assert_eq(res, gdf) def test_serialize_all_null_string(): data = [None, None, None, None, None] pd_series = pd.Series(data, dtype="str") gd_series = cudf.Series(data, dtype="str") recreated = cudf.Series.deserialize(*gd_series.serialize()) assert_eq(recreated, pd_series) def test_serialize_named_series(): gdf = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [5, 1, 2, 5]}) ser = gdf["b"] recreated = cudf.Series.deserialize(*ser.serialize()) assert_eq(recreated, ser) def test_serialize_seriesgroupby(): gdf = cudf.DataFrame({"a": [1, 2, 3, 4], "b": [5, 1, 2, 5]}) gb = gdf.groupby(["a"]).b recreated = gb.__class__.deserialize(*gb.serialize()) assert_eq(recreated.sum(), gb.sum()) def test_serialize_seriesresampler(): index = cudf.date_range(start="2001-01-01", periods=10, freq="1T") sr = cudf.Series(range(10), index=index) re_sampler = sr.resample("3T") actual = re_sampler.sum() recreated = re_sampler.__class__.deserialize(*re_sampler.serialize()) expected = recreated.sum() assert_eq(actual, expected) def test_serialize_string_check_buffer_sizes(): df = cudf.DataFrame({"a": ["a", "b", "cd", None]}) expect = df.memory_usage(deep=True).loc["a"] header, frames = df.serialize() got = sum(b.nbytes for b in frames) assert expect == got def test_deserialize_cudf_0_16(datadir): fname = datadir / "pkl" / "stringColumnWithRangeIndex_cudf_0.16.pkl" expected = cudf.DataFrame({"a": ["hi", "hello", "world", None]}) with open(fname, "rb") as f: actual = pickle.load(f) assert_eq(expected, actual) def test_serialize_sliced_string(): # https://github.com/rapidsai/cudf/issues/7735 data = ["hi", "hello", None] pd_series = pd.Series(data, dtype=pd.StringDtype()) gd_series = cudf.Series(data, dtype="str") sliced = gd_series[0:3] serialized_gd_series = gd_series.serialize() serialized_sliced = sliced.serialize() # validate frames are equal or not # because both should be identical for i in range(3): assert_eq( serialized_gd_series[1][i].memoryview(), serialized_sliced[1][i].memoryview(), ) recreated = cudf.Series.deserialize(*sliced.serialize()) assert_eq(recreated.to_pandas(nullable=True), pd_series)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_udf_binops.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. import numpy as np import pytest from numba.cuda import compile_ptx from numba.np import numpy_support import rmm import cudf from cudf import Series, _lib as libcudf from cudf.utils import dtypes as dtypeutils _driver_version = rmm._cuda.gpu.driverGetVersion() _runtime_version = rmm._cuda.gpu.runtimeGetVersion() _CUDA_JIT128INT_SUPPORTED = (_driver_version >= 11050) and ( _runtime_version >= 11050 ) @pytest.mark.skipif(not _CUDA_JIT128INT_SUPPORTED, reason="requires CUDA 11.5") @pytest.mark.parametrize( "dtype", sorted(list(dtypeutils.NUMERIC_TYPES - {"int8"})) ) def test_generic_ptx(dtype): size = 500 lhs_arr = np.random.random(size).astype(dtype) lhs_col = Series(lhs_arr)._column rhs_arr = np.random.random(size).astype(dtype) rhs_col = Series(rhs_arr)._column def generic_function(a, b): return a**3 + b nb_type = numpy_support.from_dtype(cudf.dtype(dtype)) type_signature = (nb_type, nb_type) ptx_code, output_type = compile_ptx( generic_function, type_signature, device=True ) dtype = numpy_support.as_dtype(output_type).type out_col = libcudf.binaryop.binaryop_udf(lhs_col, rhs_col, ptx_code, dtype) result = lhs_arr**3 + rhs_arr np.testing.assert_almost_equal(result, out_col.values_host)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_multiindex.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. """ Test related to MultiIndex """ import itertools import operator import pickle import re from contextlib import contextmanager from io import BytesIO import cupy as cp import numpy as np import pandas as pd import pytest import cudf from cudf.api.extensions import no_default from cudf.core._compat import PANDAS_GE_200 from cudf.core.column import as_column from cudf.core.index import as_index from cudf.testing._utils import ( assert_eq, assert_exceptions_equal, assert_neq, expect_warning_if, ) @contextmanager def expect_pandas_performance_warning(idx): with expect_warning_if( (not isinstance(idx[0], tuple) and len(idx) > 2) or (isinstance(idx[0], tuple) and len(idx[0]) > 2), pd.errors.PerformanceWarning, ): yield def test_multiindex_levels_codes_validation(): levels = [["a", "b"], ["c", "d"]] # Codes not a sequence of sequences assert_exceptions_equal( lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex, lfunc_args_and_kwargs=([levels, [0, 1]],), rfunc_args_and_kwargs=([levels, [0, 1]],), ) # Codes don't match levels assert_exceptions_equal( lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex, lfunc_args_and_kwargs=([levels, [[0], [1], [1]]],), rfunc_args_and_kwargs=([levels, [[0], [1], [1]]],), ) # Largest code greater than number of levels assert_exceptions_equal( lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex, lfunc_args_and_kwargs=([levels, [[0, 1], [0, 2]]],), rfunc_args_and_kwargs=([levels, [[0, 1], [0, 2]]],), ) # Unequal code lengths assert_exceptions_equal( lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex, lfunc_args_and_kwargs=([levels, [[0, 1], [0]]],), rfunc_args_and_kwargs=([levels, [[0, 1], [0]]],), ) # Didn't pass levels and codes assert_exceptions_equal(lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex) # Didn't pass non zero levels and codes assert_exceptions_equal( lfunc=pd.MultiIndex, rfunc=cudf.MultiIndex, lfunc_args_and_kwargs=([[], []],), rfunc_args_and_kwargs=([[], []],), ) def test_multiindex_construction(): levels = [["a", "b"], ["c", "d"]] codes = [[0, 1], [1, 0]] pmi = pd.MultiIndex(levels, codes) mi = cudf.MultiIndex(levels, codes) assert_eq(pmi, mi) pmi = pd.MultiIndex(levels, codes) mi = cudf.MultiIndex(levels=levels, codes=codes) assert_eq(pmi, mi) def test_multiindex_types(): codes = [[0, 1], [1, 0]] levels = [[0, 1], [2, 3]] pmi = pd.MultiIndex(levels, codes) mi = cudf.MultiIndex(levels, codes) assert_eq(pmi, mi) levels = [[1.2, 2.1], [1.3, 3.1]] pmi = pd.MultiIndex(levels, codes) mi = cudf.MultiIndex(levels, codes) assert_eq(pmi, mi) levels = [["a", "b"], ["c", "d"]] pmi = pd.MultiIndex(levels, codes) mi = cudf.MultiIndex(levels, codes) assert_eq(pmi, mi) def test_multiindex_df_assignment(): pdf = pd.DataFrame({"x": [1, 2, 3]}) gdf = cudf.from_pandas(pdf) pdf.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]]) gdf.index = cudf.MultiIndex( levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]] ) assert_eq(pdf, gdf) def test_multiindex_series_assignment(): ps = pd.Series([1, 2, 3]) gs = cudf.from_pandas(ps) ps.index = pd.MultiIndex([["a", "b"], ["c", "d"]], [[0, 1, 0], [1, 0, 1]]) gs.index = cudf.MultiIndex( levels=[["a", "b"], ["c", "d"]], codes=[[0, 1, 0], [1, 0, 1]] ) assert_eq(ps, gs) def test_multiindex_swaplevel(): midx = cudf.MultiIndex( levels=[ ["lama", "cow", "falcon"], ["speed", "weight", "length"], ["first", "second"], ], codes=[ [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2], [0, 0, 0, 0, 0, 0, 1, 1, 1], ], names=["Col1", "Col2", "Col3"], ) pd_midx = midx.to_pandas() assert_eq(pd_midx.swaplevel(-1, -2), midx.swaplevel(-1, -2)) assert_eq(pd_midx.swaplevel(2, 1), midx.swaplevel(2, 1)) assert_eq(midx.swaplevel(2, 1), midx.swaplevel(1, 2)) assert_eq(pd_midx.swaplevel(0, 2), midx.swaplevel(0, 2)) assert_eq(pd_midx.swaplevel(2, 0), midx.swaplevel(2, 0)) assert_eq(midx.swaplevel(1, 1), midx.swaplevel(1, 1)) def test_string_index(): from cudf.core.index import Index pdf = pd.DataFrame(np.random.rand(5, 5)) gdf = cudf.from_pandas(pdf) stringIndex = ["a", "b", "c", "d", "e"] pdf.index = stringIndex gdf.index = stringIndex assert_eq(pdf, gdf) stringIndex = np.array(["a", "b", "c", "d", "e"]) pdf.index = stringIndex gdf.index = stringIndex assert_eq(pdf, gdf) stringIndex = Index(["a", "b", "c", "d", "e"], name="name") pdf.index = stringIndex.to_pandas() gdf.index = stringIndex assert_eq(pdf, gdf) stringIndex = as_index(as_column(["a", "b", "c", "d", "e"]), name="name") pdf.index = stringIndex.to_pandas() gdf.index = stringIndex assert_eq(pdf, gdf) def test_multiindex_row_shape(): pdf = pd.DataFrame(np.random.rand(0, 5)) gdf = cudf.from_pandas(pdf) pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]]) pdfIndex.names = ["alpha"] gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) assert_exceptions_equal( lfunc=operator.setitem, rfunc=operator.setitem, lfunc_args_and_kwargs=([], {"a": pdf, "b": "index", "c": pdfIndex}), rfunc_args_and_kwargs=([], {"a": gdf, "b": "index", "c": gdfIndex}), ) @pytest.fixture def pdf(): return pd.DataFrame(np.random.rand(7, 5)) @pytest.fixture def gdf(pdf): return cudf.from_pandas(pdf) @pytest.fixture def pdfIndex(): pdfIndex = pd.MultiIndex( [ ["a", "b", "c"], ["house", "store", "forest"], ["clouds", "clear", "storm"], ["fire", "smoke", "clear"], [ np.datetime64("2001-01-01", "ns"), np.datetime64("2002-01-01", "ns"), np.datetime64("2003-01-01", "ns"), ], ], [ [0, 0, 0, 0, 1, 1, 2], [1, 1, 1, 1, 0, 0, 2], [0, 0, 2, 2, 2, 0, 1], [0, 0, 0, 1, 2, 0, 1], [1, 0, 1, 2, 0, 0, 1], ], ) pdfIndex.names = ["alpha", "location", "weather", "sign", "timestamp"] return pdfIndex @pytest.fixture def pdfIndexNulls(): pdfIndex = pd.MultiIndex( [ ["a", "b", "c"], ["house", "store", "forest"], ["clouds", "clear", "storm"], ], [ [0, 0, 0, -1, 1, 1, 2], [1, -1, 1, 1, 0, 0, -1], [-1, 0, 2, 2, 2, 0, 1], ], ) pdfIndex.names = ["alpha", "location", "weather"] return pdfIndex def test_from_pandas(pdf, pdfIndex): pdf.index = pdfIndex gdf = cudf.from_pandas(pdf) assert_eq(pdf, gdf) def test_multiindex_transpose(pdf, pdfIndex): pdf.index = pdfIndex gdf = cudf.from_pandas(pdf) assert_eq(pdf.transpose(), gdf.transpose()) def test_from_pandas_series(): pdf = pd.DataFrame( {"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} ).set_index(["a", "b"]) result = cudf.from_pandas(pdf) assert_eq(pdf, result) test_pdf = pdf["c"] result = cudf.from_pandas(test_pdf) assert_eq(test_pdf, result) def test_series_multiindex(pdfIndex): ps = pd.Series(np.random.rand(7)) gs = cudf.from_pandas(ps) ps.index = pdfIndex gs.index = cudf.from_pandas(pdfIndex) assert_eq(ps, gs) def test_multiindex_take(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex assert_eq(pdf.index.take([0]), gdf.index.take([0])) assert_eq(pdf.index.take(np.array([0])), gdf.index.take(np.array([0]))) from cudf import Series assert_eq(pdf.index.take(pd.Series([0])), gdf.index.take(Series([0]))) assert_eq(pdf.index.take([0, 1]), gdf.index.take([0, 1])) assert_eq( pdf.index.take(np.array([0, 1])), gdf.index.take(np.array([0, 1])) ) assert_eq( pdf.index.take(pd.Series([0, 1])), gdf.index.take(Series([0, 1])) ) def test_multiindex_getitem(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex assert_eq(pdf.index[0], gdf.index[0]) @pytest.mark.parametrize( "key_tuple", [ # return 2 rows, 0 remaining keys = dataframe with entire index ("a", "store", "clouds", "fire"), (("a", "store", "clouds", "fire"), slice(None)), # return 2 rows, 1 remaining key = dataframe with n-k index columns ("a", "store", "storm"), (("a", "store", "storm"), slice(None)), # return 2 rows, 2 remaining keys = dataframe with n-k index columns ("a", "store"), (("a", "store"), slice(None)), # return 2 rows, n-1 remaining keys = dataframe with n-k index columns ("a",), "a", "b", "c", (("a",), slice(None)), # return 1 row, 0 remaining keys = dataframe with entire index ("a", "store", "storm", "smoke"), (("a", "store", "storm", "smoke"), slice(None)), # return 1 row and 1 remaining key = series ("c", "forest", "clear"), (("c", "forest", "clear"), slice(None)), ], ) def test_multiindex_loc(pdf, gdf, pdfIndex, key_tuple): gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex # The index is unsorted, which makes things slow but is fine for testing. with expect_pandas_performance_warning(key_tuple): expected = pdf.loc[key_tuple] got = gdf.loc[key_tuple].sort_index() assert_eq(expected.sort_index(), got) with cudf.option_context("mode.pandas_compatible", True): got = gdf.loc[key_tuple] assert_eq(expected, got) @pytest.mark.parametrize( "indexer", [ (([1, 1], [0, 1]), slice(None)), (([1, 1], [1, 0]), slice(None)), ], ) def test_multiindex_compatible_ordering(indexer): df = pd.DataFrame( {"a": [1, 1, 2, 3], "b": [1, 0, 1, 1], "c": [1, 2, 3, 4]} ).set_index(["a", "b"]) cdf = cudf.from_pandas(df) expect = df.loc[indexer] with cudf.option_context("mode.pandas_compatible", True): actual = cdf.loc[indexer] assert_eq(actual, expect) @pytest.mark.parametrize( "arg", [ slice(("a", "store"), ("b", "house")), slice(None, ("b", "house")), slice(("a", "store"), None), slice(None), ], ) def test_multiindex_loc_slice(pdf, gdf, pdfIndex, arg): gdf = cudf.from_pandas(pdf) gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex assert_eq(pdf.loc[arg], gdf.loc[arg]) def test_multiindex_loc_errors(pdf, gdf, pdfIndex): gdf = cudf.from_pandas(pdf) gdfIndex = cudf.from_pandas(pdfIndex) gdf.index = gdfIndex with pytest.raises(KeyError): gdf.loc[("a", "store", "clouds", "foo")] with pytest.raises(IndexError): gdf.loc[ ("a", "store", "clouds", "fire", "x", "y") ] # too many indexers with pytest.raises(IndexError): gdf.loc[slice(None, ("a", "store", "clouds", "fire", "x", "y"))] def test_multiindex_loc_then_column(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex # The index is unsorted, which makes things slow but is fine for testing. with pytest.warns(pd.errors.PerformanceWarning): expected = pdf.loc[("a", "store", "clouds", "fire"), :][0] got = gdf.loc[("a", "store", "clouds", "fire"), :][0] assert_eq(expected, got) def test_multiindex_loc_rows_0(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex assert_exceptions_equal( lfunc=pdf.loc.__getitem__, rfunc=gdf.loc.__getitem__, lfunc_args_and_kwargs=([(("d",), slice(None, None, None))],), rfunc_args_and_kwargs=([(("d",), slice(None, None, None))],), ) def test_multiindex_loc_rows_1_2_key(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex print(pdf.loc[("c", "forest"), :]) print(gdf.loc[("c", "forest"), :].to_pandas()) assert_eq(pdf.loc[("c", "forest"), :], gdf.loc[("c", "forest"), :]) def test_multiindex_loc_rows_1_1_key(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex print(pdf.loc[("c",), :]) print(gdf.loc[("c",), :].to_pandas()) assert_eq(pdf.loc[("c",), :], gdf.loc[("c",), :]) def test_multiindex_column_shape(): pdf = pd.DataFrame(np.random.rand(5, 0)) gdf = cudf.from_pandas(pdf) pdfIndex = pd.MultiIndex([["a", "b", "c"]], [[0]]) pdfIndex.names = ["alpha"] gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) assert_exceptions_equal( lfunc=operator.setitem, rfunc=operator.setitem, lfunc_args_and_kwargs=([], {"a": pdf, "b": "columns", "c": pdfIndex}), rfunc_args_and_kwargs=([], {"a": gdf, "b": "columns", "c": gdfIndex}), ) @pytest.mark.parametrize( "query", [ ("a", "store", "clouds", "fire"), ("a", "store", "storm", "smoke"), ("a", "store"), ("b", "house"), ("a", "store", "storm"), ("a",), ("c", "forest", "clear"), ], ) def test_multiindex_columns(pdf, gdf, pdfIndex, query): pdf = pdf.T gdf = cudf.from_pandas(pdf) gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) pdf.columns = pdfIndex gdf.columns = gdfIndex # The index is unsorted, which makes things slow but is fine for testing. with expect_pandas_performance_warning(query): expected = pdf[query] got = gdf[query] assert_eq(expected, got) def test_multiindex_from_tuples(): arrays = [["a", "a", "b", "b"], ["house", "store", "house", "store"]] tuples = list(zip(*arrays)) pmi = pd.MultiIndex.from_tuples(tuples) gmi = cudf.MultiIndex.from_tuples(tuples) assert_eq(pmi, gmi) def test_multiindex_from_dataframe(): if not hasattr(pd.MultiIndex([[]], [[]]), "codes"): pytest.skip() pdf = pd.DataFrame( [["a", "house"], ["a", "store"], ["b", "house"], ["b", "store"]] ) gdf = cudf.from_pandas(pdf) pmi = pd.MultiIndex.from_frame(pdf, names=["alpha", "location"]) gmi = cudf.MultiIndex.from_frame(gdf, names=["alpha", "location"]) assert_eq(pmi, gmi) @pytest.mark.parametrize( "arrays", [ [["a", "a", "b", "b"], ["house", "store", "house", "store"]], [["a", "n", "n"] * 1000, ["house", "store", "house", "store"]], [ ["a", "n", "n"], ["house", "store", "house", "store", "store"] * 1000, ], [ ["a", "a", "n"] * 50, ["house", "store", "house", "store", "store"] * 100, ], ], ) def test_multiindex_from_product(arrays): pmi = pd.MultiIndex.from_product(arrays, names=["alpha", "location"]) gmi = cudf.MultiIndex.from_product(arrays, names=["alpha", "location"]) assert_eq(pmi, gmi) def test_multiindex_index_and_columns(): gdf = cudf.DataFrame() gdf["x"] = np.random.randint(0, 5, 5) gdf["y"] = np.random.randint(0, 5, 5) pdf = gdf.to_pandas() mi = cudf.MultiIndex( levels=[[0, 1, 2], [3, 4]], codes=[[0, 0, 1, 1, 2], [0, 1, 0, 1, 1]], names=["x", "y"], ) gdf.index = mi mc = cudf.MultiIndex( levels=[["val"], ["mean", "min"]], codes=[[0, 0], [0, 1]] ) gdf.columns = mc pdf.index = mi.to_pandas() pdf.columns = mc.to_pandas() assert_eq(pdf, gdf) def test_multiindex_multiple_groupby(): pdf = pd.DataFrame( { "a": [4, 17, 4, 9, 5], "b": [1, 4, 4, 3, 2], "x": np.random.normal(size=5), } ) gdf = cudf.DataFrame.from_pandas(pdf) pdg = pdf.groupby(["a", "b"], sort=True).sum() gdg = gdf.groupby(["a", "b"], sort=True).sum() assert_eq(pdg, gdg) pdg = pdf.groupby(["a", "b"], sort=True).x.sum() gdg = gdf.groupby(["a", "b"], sort=True).x.sum() assert_eq(pdg, gdg) @pytest.mark.parametrize( "func", [ lambda df: df.groupby(["x", "y"], sort=True).z.sum(), lambda df: df.groupby(["x", "y"], sort=True).sum(), ], ) def test_multi_column(func): pdf = pd.DataFrame( { "x": np.random.randint(0, 5, size=1000), "y": np.random.randint(0, 10, size=1000), "z": np.random.normal(size=1000), } ) gdf = cudf.DataFrame.from_pandas(pdf) a = func(pdf) b = func(gdf) assert_eq(a, b) def test_multiindex_equality(): # mi made from groupby # mi made manually to be identical # are they equal? gdf = cudf.DataFrame( {"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]} ) mi1 = gdf.groupby(["x", "y"], sort=True).mean().index mi2 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) assert_eq(mi1, mi2) # mi made from two groupbys, are they equal? mi2 = gdf.groupby(["x", "y"], sort=True).max().index assert_eq(mi1, mi2) # mi made manually twice are they equal? mi1 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) mi2 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) assert_eq(mi1, mi2) # mi made from different groupbys are they not equal? mi1 = gdf.groupby(["x", "y"]).mean().index mi2 = gdf.groupby(["x", "z"]).mean().index assert_neq(mi1, mi2) # mi made from different manuals are they not equal? mi1 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) mi2 = cudf.MultiIndex( levels=[[0, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) assert_neq(mi1, mi2) def test_multiindex_equals(): # mi made from groupby # mi made manually to be identical # are they equal? gdf = cudf.DataFrame( {"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]} ) mi1 = gdf.groupby(["x", "y"], sort=True).mean().index mi2 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) assert_eq(mi1.equals(mi2), True) # mi made from two groupbys, are they equal? mi2 = gdf.groupby(["x", "y"], sort=True).max().index assert_eq(mi1.equals(mi2), True) # mi made manually twice are they equal? mi1 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) mi2 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) assert_eq(mi1.equals(mi2), True) # mi made from different groupbys are they not equal? mi1 = gdf.groupby(["x", "y"], sort=True).mean().index mi2 = gdf.groupby(["x", "z"], sort=True).mean().index assert_eq(mi1.equals(mi2), False) # mi made from different manuals are they not equal? mi1 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) mi2 = cudf.MultiIndex( levels=[[0, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) assert_eq(mi1.equals(mi2), False) @pytest.mark.parametrize( "data", [ { "Date": [ "2020-08-27", "2020-08-28", "2020-08-31", "2020-08-27", "2020-08-28", "2020-08-31", "2020-08-27", "2020-08-28", "2020-08-31", ], "Close": [ 3400.00, 3401.80, 3450.96, 226.58, 228.91, 225.53, 505.13, 525.91, 534.98, ], "Symbol": [ "AMZN", "AMZN", "AMZN", "MSFT", "MSFT", "MSFT", "NVDA", "NVDA", "NVDA", ], } ], ) @pytest.mark.parametrize( "levels", [[["2000-01-01", "2000-01-02", "2000-01-03"], ["A", "B", "C"]], None], ) @pytest.mark.parametrize( "codes", [[[0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0]], None] ) @pytest.mark.parametrize("names", [["X", "Y"]]) def test_multiindex_copy_sem(data, levels, codes, names): """Test semantic equality for MultiIndex.copy""" gdf = cudf.DataFrame(data) pdf = gdf.to_pandas() gdf = gdf.groupby(["Date", "Symbol"], sort=True).mean() pdf = pdf.groupby(["Date", "Symbol"], sort=True).mean() gmi = gdf.index with expect_warning_if(levels is not None or codes is not None): gmi_copy = gmi.copy(levels=levels, codes=codes, names=names) pmi = pdf.index with expect_warning_if(levels is not None or codes is not None): pmi_copy = pmi.copy(levels=levels, codes=codes, names=names) for glv, plv in zip(gmi_copy.levels, pmi_copy.levels): assert all(glv.values_host == plv.values) for gval, pval in zip(gmi.codes, pmi.codes): assert_eq(gval, pval) assert_eq(gmi_copy.names, pmi_copy.names) # Test same behavior when used on DataFrame gdf.index = gmi_copy pdf.index = pmi_copy assert repr(gdf) == repr(pdf) @pytest.mark.parametrize( "data", [ { "Date": [ "2020-08-27", "2020-08-28", "2020-08-31", "2020-08-27", "2020-08-28", "2020-08-31", "2020-08-27", "2020-08-28", "2020-08-31", ], "Close": [ 3400.00, 3401.80, 3450.96, 226.58, 228.91, 225.53, 505.13, 525.91, 534.98, ], "Symbol": [ "AMZN", "AMZN", "AMZN", "MSFT", "MSFT", "MSFT", "NVDA", "NVDA", "NVDA", ], }, cudf.MultiIndex( levels=[[1001, 1002], [2001, 2002]], codes=[[1, 1, 0, 0], [0, 1, 0, 1]], names=["col1", "col2"], ), ], ) @pytest.mark.parametrize("copy_on_write", [True, False]) @pytest.mark.parametrize("deep", [True, False]) def test_multiindex_copy_deep(data, copy_on_write, deep): """Test memory identity for deep copy Case1: Constructed from GroupBy, StringColumns Case2: Constructed from MultiIndex, NumericColumns """ original_cow_setting = cudf.get_option("copy_on_write") cudf.set_option("copy_on_write", copy_on_write) if isinstance(data, dict): import operator from functools import reduce gdf = cudf.DataFrame(data) mi1 = gdf.groupby(["Date", "Symbol"]).mean().index mi2 = mi1.copy(deep=deep) lchildren = [col.children for _, col in mi1._data.items()] rchildren = [col.children for _, col in mi2._data.items()] # Flatten lchildren = reduce(operator.add, lchildren) rchildren = reduce(operator.add, rchildren) lptrs = [child.base_data.get_ptr(mode="read") for child in lchildren] rptrs = [child.base_data.get_ptr(mode="read") for child in rchildren] assert all((x == y) for x, y in zip(lptrs, rptrs)) elif isinstance(data, cudf.MultiIndex): same_ref = (not deep) or ( cudf.get_option("copy_on_write") and not deep ) mi1 = data mi2 = mi1.copy(deep=deep) # Assert ._levels identity lptrs = [ lv._data._data[None].base_data.get_ptr(mode="read") for lv in mi1._levels ] rptrs = [ lv._data._data[None].base_data.get_ptr(mode="read") for lv in mi2._levels ] assert all((x == y) == same_ref for x, y in zip(lptrs, rptrs)) # Assert ._codes identity lptrs = [ c.base_data.get_ptr(mode="read") for _, c in mi1._codes._data.items() ] rptrs = [ c.base_data.get_ptr(mode="read") for _, c in mi2._codes._data.items() ] assert all((x == y) == same_ref for x, y in zip(lptrs, rptrs)) # Assert ._data identity lptrs = [ d.base_data.get_ptr(mode="read") for _, d in mi1._data.items() ] rptrs = [ d.base_data.get_ptr(mode="read") for _, d in mi2._data.items() ] assert all((x == y) == same_ref for x, y in zip(lptrs, rptrs)) cudf.set_option("copy_on_write", original_cow_setting) @pytest.mark.parametrize( "iloc_rows", [ 0, 1, slice(None, 0), slice(None, 1), slice(0, 1), slice(1, 2), slice(0, 2), slice(0, None), slice(1, None), ], ) @pytest.mark.parametrize( "iloc_columns", [ 0, 1, slice(None, 0), slice(None, 1), slice(0, 1), slice(1, 2), slice(0, 2), slice(0, None), slice(1, None), ], ) def test_multiindex_iloc(pdf, gdf, pdfIndex, iloc_rows, iloc_columns): gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex presult = pdf.iloc[iloc_rows, iloc_columns] gresult = gdf.iloc[iloc_rows, iloc_columns] if isinstance(gresult, cudf.DataFrame): assert_eq( presult, gresult, check_index_type=False, check_column_type=False ) else: assert_eq(presult, gresult, check_index_type=False, check_dtype=False) def test_multiindex_iloc_scalar(): arrays = [["a", "a", "b", "b"], [1, 2, 3, 4]] tuples = list(zip(*arrays)) idx = cudf.MultiIndex.from_tuples(tuples) gdf = cudf.DataFrame( {"first": cp.random.rand(4), "second": cp.random.rand(4)} ) gdf.index = idx pdf = gdf.to_pandas() assert_eq(pdf.iloc[3], gdf.iloc[3]) @pytest.mark.parametrize( "iloc_rows", [ 0, 1, slice(None, 0), slice(None, 1), slice(0, 1), slice(1, 2), slice(0, 2), slice(0, None), slice(1, None), ], ) @pytest.mark.parametrize( "iloc_columns", [ 0, 1, slice(None, 0), slice(None, 1), slice(0, 1), slice(1, 2), slice(0, 2), slice(0, None), slice(1, None), ], ) def test_multicolumn_iloc(pdf, gdf, pdfIndex, iloc_rows, iloc_columns): gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex, gdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex pdf = pdf.T gdf = gdf.T presult = pdf.iloc[iloc_rows, iloc_columns] gresult = gdf.iloc[iloc_rows, iloc_columns] if hasattr(gresult, "name") and isinstance(gresult.name, tuple): name = gresult.name[len(gresult.name) - 1] if isinstance(name, str) and "cudf" in name: gresult.name = name if isinstance(presult, pd.DataFrame): assert_eq( presult, gresult, check_index_type=False, check_column_type=False ) else: assert_eq(presult, gresult, check_index_type=False, check_dtype=False) def test_multicolumn_item(): gdf = cudf.DataFrame( {"x": np.arange(10), "y": np.arange(10), "z": np.arange(10)} ) gdg = gdf.groupby(["x", "y"]).min() gdgT = gdg.T pdgT = gdgT.to_pandas() assert_eq(gdgT[(0, 0)], pdgT[(0, 0)]) def test_multiindex_to_frame(pdfIndex, pdfIndexNulls): gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex.to_frame(), gdfIndex.to_frame()) gdfIndex = cudf.from_pandas(pdfIndexNulls) assert_eq( pdfIndexNulls.to_frame().fillna("nan"), gdfIndex.to_frame().fillna("nan"), ) def test_multiindex_groupby_to_frame(): gdf = cudf.DataFrame( {"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]} ) pdf = gdf.to_pandas() gdg = gdf.groupby(["x", "y"], sort=True).count() pdg = pdf.groupby(["x", "y"], sort=True).count() assert_eq(pdg.index.to_frame(), gdg.index.to_frame()) def test_multiindex_reset_index(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex assert_eq(pdf.reset_index(), gdf.reset_index()) def test_multiindex_groupby_reset_index(): gdf = cudf.DataFrame( {"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [0, 1, 0, 1, 0]} ) pdf = gdf.to_pandas() gdg = gdf.groupby(["x", "y"], sort=True).sum() pdg = pdf.groupby(["x", "y"], sort=True).sum() assert_eq(pdg.reset_index(), gdg.reset_index()) def test_multicolumn_reset_index(): gdf = cudf.DataFrame({"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5]}) pdf = gdf.to_pandas() gdg = gdf.groupby(["x"], sort=True).agg({"y": ["count", "mean"]}) pdg = pdf.groupby(["x"], sort=True).agg({"y": ["count", "mean"]}) assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False) gdg = gdf.groupby(["x"], sort=True).agg({"y": ["count"]}) pdg = pdf.groupby(["x"], sort=True).agg({"y": ["count"]}) assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False) gdg = gdf.groupby(["x"], sort=True).agg({"y": "count"}) pdg = pdf.groupby(["x"], sort=True).agg({"y": "count"}) assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False) def test_multiindex_multicolumn_reset_index(): gdf = cudf.DataFrame( {"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [1, 2, 3, 4, 5]} ) pdf = gdf.to_pandas() gdg = gdf.groupby(["x", "y"], sort=True).agg({"y": ["count", "mean"]}) pdg = pdf.groupby(["x", "y"], sort=True).agg({"y": ["count", "mean"]}) assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False) gdg = gdf.groupby(["x", "z"], sort=True).agg({"y": ["count", "mean"]}) pdg = pdf.groupby(["x", "z"], sort=True).agg({"y": ["count", "mean"]}) assert_eq(pdg.reset_index(), gdg.reset_index(), check_dtype=False) def test_groupby_multiindex_columns_from_pandas(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex assert_eq(gdf, pdf) assert_eq(gdf.T, pdf.T) def test_multiindex_rows_with_wildcard(pdf, gdf, pdfIndex): gdfIndex = cudf.from_pandas(pdfIndex) pdf.index = pdfIndex gdf.index = gdfIndex # The index is unsorted, which makes things slow but is fine for testing. with pytest.warns(pd.errors.PerformanceWarning): assert_eq( pdf.loc[("a",), :].sort_index(), gdf.loc[("a",), :].sort_index() ) assert_eq( pdf.loc[(("a"), ("store")), :].sort_index(), gdf.loc[(("a"), ("store")), :].sort_index(), ) assert_eq( pdf.loc[(("a"), ("store"), ("storm")), :].sort_index(), gdf.loc[(("a"), ("store"), ("storm")), :].sort_index(), ) assert_eq( pdf.loc[(("a"), ("store"), ("storm"), ("smoke")), :].sort_index(), gdf.loc[(("a"), ("store"), ("storm"), ("smoke")), :].sort_index(), ) assert_eq( pdf.loc[(slice(None), "store"), :].sort_index(), gdf.loc[(slice(None), "store"), :].sort_index(), ) assert_eq( pdf.loc[(slice(None), slice(None), "storm"), :].sort_index(), gdf.loc[(slice(None), slice(None), "storm"), :].sort_index(), ) assert_eq( pdf.loc[ (slice(None), slice(None), slice(None), "smoke"), : ].sort_index(), gdf.loc[ (slice(None), slice(None), slice(None), "smoke"), : ].sort_index(), ) def test_multiindex_multicolumn_zero_row_slice(): gdf = cudf.DataFrame( {"x": [1, 5, 3, 4, 1], "y": [1, 1, 2, 2, 5], "z": [1, 2, 3, 4, 5]} ) pdf = gdf.to_pandas() gdg = gdf.groupby(["x", "y"]).agg({"z": ["count"]}).iloc[:0] pdg = pdf.groupby(["x", "y"]).agg({"z": ["count"]}).iloc[:0] assert_eq(pdg, gdg, check_dtype=False) def test_multicolumn_loc(pdf, pdfIndex): pdf = pdf.T pdf.columns = pdfIndex gdf = cudf.from_pandas(pdf) assert_eq(pdf.loc[:, "a"], gdf.loc[:, "a"]) assert_eq(pdf.loc[:, ("a", "store")], gdf.loc[:, ("a", "store")]) assert_eq(pdf.loc[:, "a":"b"], gdf.loc[:, "a":"b"]) assert_eq(pdf.loc[:, ["a", "b"]], gdf.loc[:, ["a", "b"]]) @pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/43351", ) def test_multicolumn_set_item(pdf, pdfIndex): pdf = pdf.T pdf.columns = pdfIndex gdf = cudf.from_pandas(pdf) pdf["d"] = [1, 2, 3, 4, 5] gdf["d"] = [1, 2, 3, 4, 5] assert_eq(pdf, gdf) def test_multiindex_iter_error(): midx = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) with pytest.raises( TypeError, match=re.escape( f"{midx.__class__.__name__} object is not iterable. " f"Consider using `.to_arrow()`, `.to_pandas()` or `.values_host` " f"if you wish to iterate over the values." ), ): iter(midx) def test_multiindex_values(): midx = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) result = midx.values assert isinstance(result, cp.ndarray) np.testing.assert_array_equal( result.get(), np.array([[1, 1], [1, 5], [3, 2], [4, 2], [5, 1]]) ) def test_multiindex_values_host(): midx = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) pmidx = midx.to_pandas() assert_eq(midx.values_host, pmidx.values) def test_multiindex_to_numpy(): midx = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) pmidx = midx.to_pandas() assert_eq(midx.to_numpy(), pmidx.to_numpy()) @pytest.mark.parametrize( "gdi, fill_value, expected", [ ( cudf.MultiIndex( levels=[[1, 3, 4, None], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), 5, cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), ), ( cudf.MultiIndex( levels=[[1, 3, 4, None], [1, None, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), 100, cudf.MultiIndex( levels=[[1, 3, 4, 100], [1, 100, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), ), ( cudf.MultiIndex( levels=[["a", "b", "c", None], ["1", None, "5"]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), "100", cudf.MultiIndex( levels=[["a", "b", "c", "100"], ["1", "100", "5"]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), ), ], ) def test_multiindex_fillna(gdi, fill_value, expected): assert_eq(expected, gdi.fillna(fill_value)) @pytest.mark.parametrize( "pdi", [ pd.MultiIndex( levels=[[], [], []], codes=[[], [], []], names=["one", "two", "three"], ), pd.MultiIndex.from_tuples( list( zip( *[ [ "bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux", ], [ "one", "two", "one", "two", "one", "two", "one", "two", ], ] ) ) ), ], ) def test_multiindex_empty(pdi): gdi = cudf.from_pandas(pdi) assert_eq(pdi.empty, gdi.empty) @pytest.mark.parametrize( "pdi", [ pd.MultiIndex( levels=[[], [], []], codes=[[], [], []], names=["one", "two", "three"], ), pd.MultiIndex.from_tuples( list( zip( *[ [ "bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux", ], [ "one", "two", "one", "two", "one", "two", "one", "two", ], ] ) ) ), ], ) def test_multiindex_size(pdi): gdi = cudf.from_pandas(pdi) assert_eq(pdi.size, gdi.size) @pytest.mark.parametrize( "level", [ [], "alpha", "location", "weather", 0, 1, [0, 1], -1, [-1, -2], [-1, "weather"], ], ) def test_multiindex_droplevel_simple(pdfIndex, level): gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex.droplevel(level), gdfIndex.droplevel(level)) @pytest.mark.parametrize( "level", itertools.chain( *( itertools.combinations( ("alpha", "location", "weather", "sign", "timestamp"), r ) for r in range(5) ) ), ) def test_multiindex_droplevel_name(pdfIndex, level): level = list(level) gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex.droplevel(level), gdfIndex.droplevel(level)) @pytest.mark.parametrize( "level", itertools.chain(*(itertools.combinations(range(5), r) for r in range(5))), ) def test_multiindex_droplevel_index(pdfIndex, level): level = list(level) gdfIndex = cudf.from_pandas(pdfIndex) assert_eq(pdfIndex.droplevel(level), gdfIndex.droplevel(level)) @pytest.mark.parametrize("ascending", [True, False]) @pytest.mark.parametrize("return_indexer", [True, False]) @pytest.mark.parametrize( "pmidx", [ pd.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), pd.MultiIndex.from_product( [["bar", "baz", "foo", "qux"], ["one", "two"]], names=["first", "second"], ), pd.MultiIndex( levels=[[], [], []], codes=[[], [], []], names=["one", "two", "three"], ), pd.MultiIndex.from_tuples( list( zip( *[ [ "bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux", ], [ "one", "two", "one", "two", "one", "two", "one", "two", ], ] ) ) ), ], ) def test_multiindex_sort_values(pmidx, ascending, return_indexer): pmidx = pmidx midx = cudf.from_pandas(pmidx) expected = pmidx.sort_values( ascending=ascending, return_indexer=return_indexer ) actual = midx.sort_values( ascending=ascending, return_indexer=return_indexer ) if return_indexer: expected_indexer = expected[1] actual_indexer = actual[1] assert_eq(expected_indexer, actual_indexer) expected = expected[0] actual = actual[0] assert_eq(expected, actual) @pytest.mark.parametrize( "pdi", [ pd.MultiIndex( levels=[[1, 3.0, 4, 5], [1, 2.3, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), pd.MultiIndex( levels=[[1, 3, 4, -10], [1, 11, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), pd.MultiIndex( levels=[["a", "b", "c", "100"], ["1", "100", "5"]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), pytest.param( pd.MultiIndex( levels=[[None, "b", "c", "a"], ["1", None, "5"]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ), marks=[ pytest.mark.xfail( reason="https://github.com/pandas-dev/pandas/issues/35584" ) ], ), ], ) @pytest.mark.parametrize("ascending", [True, False]) def test_multiindex_argsort(pdi, ascending): gdi = cudf.from_pandas(pdi) if not ascending: expected = pdi.argsort()[::-1] else: expected = pdi.argsort() actual = gdi.argsort(ascending=ascending) assert_eq(expected, actual) @pytest.mark.parametrize( "idx", [pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]])] ) @pytest.mark.parametrize( "names", [[None, None], ["a", None], ["new name", "another name"]] ) @pytest.mark.parametrize("inplace", [True, False]) def test_multiindex_set_names(idx, names, inplace): pi = idx.copy() gi = cudf.from_pandas(idx) expected = pi.set_names(names=names, inplace=inplace) actual = gi.set_names(names=names, inplace=inplace) if inplace: expected, actual = pi, gi assert_eq(expected, actual) @pytest.mark.parametrize( "idx", [ pd.MultiIndex.from_product( [["python", "cobra"], [2018, 2019], ["aab", "bcd"]] ), pd.MultiIndex.from_product( [["python", "cobra"], [2018, 2019], ["aab", "bcd"]], names=[1, 0, 2], ), ], ) @pytest.mark.parametrize( "level, names", [ (0, "abc"), (1, "xyz"), ([2, 1], ["a", "b"]), ([0, 1], ["aa", "bb"]), (None, ["a", "b", "c"]), (None, ["a", None, "c"]), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_multiindex_set_names_default_and_int_names( idx, level, names, inplace ): pi = idx.copy() gi = cudf.from_pandas(idx) expected = pi.set_names(names=names, level=level, inplace=inplace) actual = gi.set_names(names=names, level=level, inplace=inplace) if inplace: expected, actual = pi, gi assert_eq(expected, actual) @pytest.mark.parametrize( "idx", [ pd.MultiIndex.from_product( [["python", "cobra"], [2018, 2019], ["aab", "bcd"]], names=["one", None, "three"], ), ], ) @pytest.mark.parametrize( "level, names", [ ([None], "abc"), (["three", "one"], ["a", "b"]), (["three", 1], ["a", "b"]), ([0, "three", 1], ["a", "b", "z"]), (["one", 1, "three"], ["a", "b", "z"]), (["one", None, "three"], ["a", "b", "z"]), ([2, 1], ["a", "b"]), (1, "xyz"), ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_multiindex_set_names_string_names(idx, level, names, inplace): pi = idx.copy() gi = cudf.from_pandas(idx) expected = pi.set_names(names=names, level=level, inplace=inplace) actual = gi.set_names(names=names, level=level, inplace=inplace) if inplace: expected, actual = pi, gi assert_eq(expected, actual) @pytest.mark.parametrize( "level, names", [(1, ["a"]), (None, "a"), ([1, 2], ["a"]), (None, ["a"])] ) def test_multiindex_set_names_error(level, names): pi = pd.MultiIndex.from_product( [["python", "cobra"], [2018, 2019], ["aab", "bcd"]] ) gi = cudf.from_pandas(pi) assert_exceptions_equal( lfunc=pi.set_names, rfunc=gi.set_names, lfunc_args_and_kwargs=([], {"names": names, "level": level}), rfunc_args_and_kwargs=([], {"names": names, "level": level}), ) @pytest.mark.parametrize( "idx", [ pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]]), pd.MultiIndex.from_product( [["python", "cobra"], [2018, 2019]], names=["old name", None] ), ], ) @pytest.mark.parametrize( "names", [ [None, None], ["a", None], ["new name", "another name"], [1, None], [2, 3], [42, "name"], ], ) @pytest.mark.parametrize("inplace", [True, False]) def test_multiindex_rename(idx, names, inplace): pi = idx.copy() gi = cudf.from_pandas(idx) expected = pi.rename(names=names, inplace=inplace) actual = gi.rename(names=names, inplace=inplace) if inplace: expected, actual = pi, gi assert_eq(expected, actual) @pytest.mark.parametrize( "names", ["plain string", 123, ["str"], ["l1", "l2", "l3"]] ) def test_multiindex_rename_error(names): pi = pd.MultiIndex.from_product([["python", "cobra"], [2018, 2019]]) gi = cudf.from_pandas(pi) assert_exceptions_equal( lfunc=pi.rename, rfunc=gi.rename, lfunc_args_and_kwargs=([], {"names": names}), rfunc_args_and_kwargs=([], {"names": names}), ) @pytest.mark.parametrize( "key", [0, 1, [], [0, 1], slice(None), slice(0, 0), slice(0, 1), slice(0, 2)], ) def test_multiindex_indexing(key): gi = cudf.MultiIndex.from_frame( cudf.DataFrame({"a": [1, 2, 3], "b": [True, False, False]}) ) pi = gi.to_pandas() assert_eq(gi[key], pi[key], exact=False) def test_multiindex_duplicate_names(): gi = cudf.MultiIndex( levels=[["a", "b"], ["b", "a"]], codes=[[0, 0], [0, 1]], names=["a", "a"], ) pi = pd.MultiIndex( levels=[["a", "b"], ["b", "a"]], codes=[[0, 0], [0, 1]], names=["a", "a"], ) assert_eq(gi, pi) def test_difference(): midx = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3], [0, 2, 1, 1, 0]], names=["x", "y"], ) midx2 = cudf.MultiIndex( levels=[[1, 3, 4, 5], [1, 2, 5]], codes=[[0, 0, 1, 2, 3, 3], [0, 2, 1, 1, 0, 2]], names=["x", "y"], ) expected = midx2.to_pandas().difference(midx.to_pandas()) actual = midx2.difference(midx) assert isinstance(actual, cudf.MultiIndex) assert_eq(expected, actual) @pytest.mark.parametrize( "idx1, idx2", [ ( pd.MultiIndex.from_arrays( [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ), pd.MultiIndex.from_arrays( [[3, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ), ), ( pd.MultiIndex.from_arrays( [[1, 2, 3, 4], ["Red", "Blue", "Red", "Blue"]], names=["a", "b"], ), pd.MultiIndex.from_arrays( [[3, 3, 2, 4], ["Red", "Green", "Red", "Green"]], names=["x", "y"], ), ), ( pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "b", "c"], ), pd.MultiIndex.from_arrays( [[3, 3, 2, 4], [0.2, 0.4, 1.4, 10], [3, 3, 2, 4]] ), ), ( pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "b", "c"], ), [(2, 6, 12)], ), ], ) @pytest.mark.parametrize("sort", [None, False]) def test_union_mulitIndex(idx1, idx2, sort): expected = idx1.union(idx2, sort=sort) idx1 = cudf.from_pandas(idx1) if isinstance(idx1, pd.MultiIndex) else idx1 idx2 = cudf.from_pandas(idx2) if isinstance(idx2, pd.MultiIndex) else idx2 actual = idx1.union(idx2, sort=sort) assert_eq(expected, actual) @pytest.mark.parametrize( "idx1, idx2", [ ( pd.MultiIndex.from_arrays( [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ), pd.MultiIndex.from_arrays( [[1, 3, 2, 2], ["Red", "Green", "Red", "Green"]] ), ), ( pd.MultiIndex.from_arrays( [[1, 2, 3, 4], ["Red", "Blue", "Red", "Blue"]], names=["a", "b"], ), pd.MultiIndex.from_arrays( [[3, 3, 2, 4], ["Red", "Green", "Red", "Green"]], names=["x", "y"], ), ), ( pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "b", "c"], ), pd.MultiIndex.from_arrays( [[3, 3, 2, 4], [0.2, 0.4, 1.4, 10], [3, 3, 2, 4]] ), ), ( pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "b", "c"], ), pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], ), ), ], ) @pytest.mark.parametrize("sort", [None, False]) def test_intersection_mulitIndex(idx1, idx2, sort): expected = idx1.intersection(idx2, sort=sort) idx1 = cudf.from_pandas(idx1) idx2 = cudf.from_pandas(idx2) actual = idx1.intersection(idx2, sort=sort) assert_eq(expected, actual, exact=False) @pytest.mark.parametrize( "names", [ ["a", "b", "c"], [None, None, None], ["aa", "aa", "aa"], ["bb", "aa", "aa"], None, ], ) def test_pickle_roundtrip_multiindex(names): df = cudf.DataFrame( { "one": [1, 2, 3], "two": [True, False, True], "three": ["ab", "cd", "ef"], "four": [0.2, 0.1, -10.2], } ) expected_df = df.set_index(["one", "two", "three"]) expected_df.index.names = names local_file = BytesIO() pickle.dump(expected_df, local_file) local_file.seek(0) actual_df = pickle.load(local_file) assert_eq(expected_df, actual_df) @pytest.mark.parametrize( "pidx", [ pd.MultiIndex.from_arrays( [[1, 1, 2, 2], ["Red", "Blue", "Red", "Blue"]] ), pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "b", "c"], ), pd.MultiIndex.from_arrays( [[1.0, 2, 3, 4], [5, 6, 7.8, 10], [11, 12, 12, 13]], ), ], ) @pytest.mark.parametrize( "func", [ "is_numeric", "is_boolean", "is_integer", "is_floating", "is_object", "is_categorical", "is_interval", ], ) def test_multiindex_type_methods(pidx, func): gidx = cudf.from_pandas(pidx) if PANDAS_GE_200: with pytest.warns(FutureWarning): expected = getattr(pidx, func)() else: expected = getattr(pidx, func)() with pytest.warns(FutureWarning): actual = getattr(gidx, func)() if func == "is_object": assert_eq(False, actual) else: assert_eq(expected, actual) def test_multiindex_index_single_row(): arrays = [["a", "a", "b", "b"], [1, 2, 3, 4]] tuples = list(zip(*arrays)) idx = cudf.MultiIndex.from_tuples(tuples) gdf = cudf.DataFrame( {"first": cp.random.rand(4), "second": cp.random.rand(4)} ) gdf.index = idx pdf = gdf.to_pandas() assert_eq(pdf.loc[("b", 3)], gdf.loc[("b", 3)]) def test_multiindex_levels(): gidx = cudf.MultiIndex.from_product( [range(3), ["one", "two"]], names=["first", "second"] ) pidx = gidx.to_pandas() assert_eq(gidx.levels[0], pidx.levels[0]) assert_eq(gidx.levels[1], pidx.levels[1]) def test_multiindex_empty_slice_pandas_compatibility(): expected = pd.MultiIndex.from_tuples([("a", "b")])[:0] with cudf.option_context("mode.pandas_compatible", True): actual = cudf.from_pandas(expected) assert_eq(expected, actual, exact=False) @pytest.mark.parametrize( "levels", itertools.chain.from_iterable( itertools.permutations(range(3), n) for n in range(1, 4) ), ids=str, ) def test_multiindex_sort_index_partial(levels): df = pd.DataFrame( { "a": [3, 3, 3, 1, 1, 1, 2, 2], "b": [4, 2, 7, -1, 11, -2, 7, 7], "c": [4, 4, 2, 3, 3, 3, 1, 1], "val": [1, 2, 3, 4, 5, 6, 7, 8], } ).set_index(["a", "b", "c"]) cdf = cudf.from_pandas(df) expect = df.sort_index(level=levels, sort_remaining=True) got = cdf.sort_index(level=levels, sort_remaining=True) assert_eq(expect, got) def test_multiindex_to_series_error(): midx = cudf.MultiIndex.from_tuples([("a", "b")]) with pytest.raises(NotImplementedError): midx.to_series() @pytest.mark.parametrize( "pidx", [ pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "b", "c"], ), pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], names=["a", "a", "a"], ), pd.MultiIndex.from_arrays( [[1, 2, 3, 4], [5, 6, 7, 10], [11, 12, 12, 13]], ), ], ) @pytest.mark.parametrize( "name", [None, no_default, ["x", "y", "z"], ["rapids", "rapids", "rapids"]] ) @pytest.mark.parametrize("allow_duplicates", [True, False]) @pytest.mark.parametrize("index", [True, False]) def test_multiindex_to_frame_allow_duplicates( pidx, name, allow_duplicates, index ): gidx = cudf.from_pandas(pidx) if ( ( len(pidx.names) != len(set(pidx.names)) and not all(x is None for x in pidx.names) ) and not allow_duplicates and (name is None or name is no_default) ): assert_exceptions_equal( pidx.to_frame, gidx.to_frame, lfunc_args_and_kwargs=( [], { "index": index, "name": name, "allow_duplicates": allow_duplicates, }, ), rfunc_args_and_kwargs=( [], { "index": index, "name": name, "allow_duplicates": allow_duplicates, }, ), ) else: if ( len(pidx.names) != len(set(pidx.names)) and not all(x is None for x in pidx.names) and not isinstance(name, list) ) or (isinstance(name, list) and len(name) != len(set(name))): # cudf doesn't have the ability to construct dataframes # with duplicate column names with expect_warning_if(name is None): with pytest.raises(ValueError): gidx.to_frame( index=index, name=name, allow_duplicates=allow_duplicates, ) else: with expect_warning_if(name is None): expected = pidx.to_frame( index=index, name=name, allow_duplicates=allow_duplicates ) with expect_warning_if(name is None): actual = gidx.to_frame( index=index, name=name, allow_duplicates=allow_duplicates ) assert_eq(expected, actual) @pytest.mark.parametrize("bad", ["foo", ["foo"]]) def test_multiindex_set_names_validation(bad): mi = cudf.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0), (1, 1)]) with pytest.raises(ValueError): mi.names = bad def test_multiindex_values_pandas_compatible(): midx = cudf.MultiIndex.from_tuples([(10, 12), (8, 9), (3, 4)]) with cudf.option_context("mode.pandas_compatible", True): with pytest.raises(NotImplementedError): midx.values def test_multiindex_dtype_error(): midx = cudf.MultiIndex.from_tuples([(10, 12), (8, 9), (3, 4)]) with pytest.raises(TypeError): cudf.Index(midx, dtype="int64") with pytest.raises(TypeError): cudf.Index(midx.to_pandas(), dtype="int64") def test_multiindex_codes(): midx = cudf.MultiIndex.from_tuples( [("a", "b"), ("a", "c"), ("b", "c")], names=["A", "Z"] ) for p_array, g_array in zip(midx.to_pandas().codes, midx.codes): assert_eq(p_array, g_array) def test_multiindex_union_error(): midx = cudf.MultiIndex.from_tuples([(10, 12), (8, 9), (3, 4)]) pidx = midx.to_pandas() assert_exceptions_equal( midx.union, pidx.union, lfunc_args_and_kwargs=(["a"],), rfunc_args_and_kwargs=(["b"],), ) @pytest.mark.parametrize("idx_get", [(0, 0), (0, 1), (1, 0), (1, 1)]) @pytest.mark.parametrize("cols_get", [0, 1, [0, 1], [1, 0], [1], [0]]) def test_multiindex_loc_scalar(idx_get, cols_get): idx = cudf.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0), (1, 1)]) df = cudf.DataFrame({0: range(4), 1: range(10, 50, 10)}, index=idx) pdf = df.to_pandas() actual = df.loc[idx_get, cols_get] expected = pdf.loc[idx_get, cols_get] assert_eq(actual, expected) def test_multiindex_eq_other_multiindex(): idx = cudf.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0), (1, 1)]) result = idx == idx expected = np.array([True, True]) assert_eq(result, expected)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_pickling.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import pickle import numpy as np import pandas as pd import pytest from cudf import DataFrame, GenericIndex, RangeIndex, Series from cudf.core.buffer import as_buffer from cudf.testing._utils import assert_eq pytestmark = pytest.mark.spilling def check_serialization(df): # basic assert_frame_picklable(df) # sliced assert_frame_picklable(df[:-1]) assert_frame_picklable(df[1:]) assert_frame_picklable(df[2:-2]) # sorted sortvaldf = df.sort_values("vals") assert isinstance(sortvaldf.index, (GenericIndex, RangeIndex)) assert_frame_picklable(sortvaldf) # out-of-band buffers = [] serialbytes = pickle.dumps(df, protocol=5, buffer_callback=buffers.append) for b in buffers: assert isinstance(b, pickle.PickleBuffer) loaded = pickle.loads(serialbytes, buffers=buffers) assert_eq(loaded, df) def assert_frame_picklable(df): serialbytes = pickle.dumps(df) loaded = pickle.loads(serialbytes) assert_eq(loaded, df) def test_pickle_dataframe_numeric(): np.random.seed(0) df = DataFrame() nelem = 10 df["keys"] = np.arange(nelem, dtype=np.float64) df["vals"] = np.random.random(nelem) check_serialization(df) def test_pickle_dataframe_categorical(): np.random.seed(0) df = DataFrame() df["keys"] = pd.Categorical( ["a", "a", "a", "b", "a", "b", "a", "b", "a", "c"] ) df["vals"] = np.random.random(len(df)) check_serialization(df) def test_memory_usage_dataframe(): np.random.seed(0) df = DataFrame() nelem = 1000 df["keys"] = hkeys = np.arange(nelem, dtype=np.float64) df["vals"] = hvals = np.random.random(nelem) nbytes = hkeys.nbytes + hvals.nbytes sizeof = df.memory_usage().sum() assert sizeof >= nbytes serialized_nbytes = len(pickle.dumps(df, protocol=pickle.HIGHEST_PROTOCOL)) # assert at least sizeof bytes were serialized assert serialized_nbytes >= sizeof def test_pickle_index(): nelem = 10 idx = GenericIndex(np.arange(nelem), name="a") pickled = pickle.dumps(idx) out = pickle.loads(pickled) assert (idx == out).all() def test_pickle_buffer(): arr = np.arange(10).view("|u1") buf = as_buffer(arr) assert buf.size == arr.nbytes pickled = pickle.dumps(buf) unpacked = pickle.loads(pickled) # Check that unpacked capacity equals buf.size assert unpacked.size == arr.nbytes @pytest.mark.parametrize("named", [True, False]) def test_pickle_series(named): np.random.seed(0) if named: ser = Series(np.random.random(10), name="a") else: ser = Series(np.random.random(10)) pickled = pickle.dumps(ser) out = pickle.loads(pickled) assert (ser == out).all() @pytest.mark.parametrize( "slices", [ slice(None, None, None), slice(1, 3, 1), slice(0, 3, 1), slice(3, 5, 1), slice(10, 12, 1), ], ) def test_pickle_categorical_column(slices): sr = Series(["a", "b", None, "a", "c", "b"]).astype("category") sliced_sr = sr.iloc[slices] input_col = sliced_sr._column pickled = pickle.dumps(input_col) out = pickle.loads(pickled) assert_eq(Series(out), Series(input_col)) @pytest.mark.parametrize( "slices", [ slice(None, None, None), slice(1, 3, 1), slice(0, 3, 1), slice(3, 5, 1), slice(10, 12, 1), ], ) def test_pickle_string_column(slices): sr = Series(["a", "b", None, "a", "c", "b"]) sliced_sr = sr.iloc[slices] input_col = sliced_sr._column pickled = pickle.dumps(input_col) out = pickle.loads(pickled) assert_eq(Series(out), Series(input_col))
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_hdfs.py
# Copyright (c) 2020-2022, NVIDIA CORPORATION. import os from io import BytesIO import fastavro import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.testing._utils import assert_eq if not os.environ.get("RUN_HDFS_TESTS"): pytestmark = pytest.mark.skip("Env not configured to run HDFS tests") basedir = "/tmp/test-hdfs" host = "localhost" # hadoop hostname port = 9000 # hadoop rpc port @pytest.fixture def hdfs(scope="module"): # Default Rpc port can be 8020/9000 depending on the hdfs config fs = pa.hdfs.connect(host=host, port=port) try: if not fs.exists(basedir): fs.mkdir(basedir) except pa.lib.ArrowIOError: pytest.skip("hdfs config probably incorrect") return fs @pytest.fixture def pdf(scope="module"): df = pd.DataFrame() df["Integer"] = np.array([2345, 11987, 9027, 9027]) df["Float"] = np.array([9.001, 8.343, 6, 2.781]) df["Integer2"] = np.array([2345, 106, 2088, 789277], dtype="uint64") df["String"] = np.array(["Alpha", "Beta", "Gamma", "Delta"]) df["Boolean"] = np.array([True, False, True, False]) return df @pytest.mark.parametrize("test_url", [False, True]) def test_read_csv(tmpdir, pdf, hdfs, test_url): fname = tmpdir.mkdir("csv").join("file.csv") # Write to local file system pdf.to_csv(fname) # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/test_csv_reader.csv", buffer) if test_url: hd_fpath = "hdfs://{}:{}{}/test_csv_reader.csv".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_csv_reader.csv" got = cudf.read_csv(hd_fpath) # Read pandas from byte buffer with hdfs.open(basedir + "/test_csv_reader.csv") as f: expect = pd.read_csv(f) assert_eq(expect, got) @pytest.mark.parametrize("test_url", [False, True]) def test_write_csv(pdf, hdfs, test_url): gdf = cudf.from_pandas(pdf) if test_url: hd_fpath = "hdfs://{}:{}{}/test_csv_writer.csv".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_csv_writer.csv" gdf.to_csv(hd_fpath, index=False) assert hdfs.exists(f"{basedir}/test_csv_writer.csv") with hdfs.open(f"{basedir}/test_csv_writer.csv", mode="rb") as f: got = pd.read_csv(f, dtype=dict(pdf.dtypes)) assert_eq(pdf, got) @pytest.mark.parametrize("test_url", [False, True]) def test_read_parquet(tmpdir, pdf, hdfs, test_url): fname = tmpdir.mkdir("parquet").join("test_parquet_reader.parquet") # Write to local file system pdf.to_parquet(fname) # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/test_parquet_reader.parquet", buffer) if test_url: hd_fpath = "hdfs://{}:{}{}/test_parquet_reader.parquet".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_parquet_reader.parquet" got = cudf.read_parquet(hd_fpath) # Read pandas from byte buffer with hdfs.open(basedir + "/test_parquet_reader.parquet") as f: expect = pd.read_parquet(f) assert_eq(expect, got) @pytest.mark.parametrize("test_url", [False, True]) def test_write_parquet(pdf, hdfs, test_url): gdf = cudf.from_pandas(pdf) if test_url: hd_fpath = "hdfs://{}:{}{}/test_parquet_writer.parquet".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_parquet_writer.parquet" gdf.to_parquet(hd_fpath) assert hdfs.exists(f"{basedir}/test_parquet_writer.parquet") with hdfs.open(f"{basedir}/test_parquet_writer.parquet", mode="rb") as f: got = pd.read_parquet(f) assert_eq(pdf, got) @pytest.mark.xfail( reason="Writing string columns with parition_cols is incorrect" ) @pytest.mark.parametrize("test_url", [False, True]) def test_write_parquet_partitioned(tmpdir, pdf, hdfs, test_url): pdf.to_parquet( path=tmpdir.join("pandas_parquet_writer_partitioned.parquet"), index=False, partition_cols=["Integer", "Boolean"], ) gdf = cudf.from_pandas(pdf) if test_url: hd_fpath = "hdfs://{}:{}{}/test_parquet_partitioned.parquet".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_parquet_partitioned.parquet" # Clear data written from previous runs hdfs.rm(f"{basedir}/test_parquet_partitioned.parquet", recursive=True) gdf.to_parquet( hd_fpath, index=False, partition_cols=["Integer", "Boolean"] ) assert hdfs.exists(f"{basedir}/test_parquet_partitioned.parquet") got = pd.read_parquet(hd_fpath) expect = pd.read_parquet( tmpdir.join("pandas_parquet_writer_partitioned.parquet") ) assert_eq(expect, got) @pytest.mark.parametrize("test_url", [False, True]) def test_read_json(tmpdir, pdf, hdfs, test_url): fname = tmpdir.mkdir("json").join("test_json_reader.json") # Write to local file system # Sorting by col_name now as pandas sorts by col name while reading json pdf.sort_index(axis=1).to_json(fname, orient="records", lines=True) # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/test_json_reader.json", buffer) if test_url: hd_fpath = "hdfs://{}:{}{}/test_json_reader.json".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_json_reader.json" got = cudf.read_json(hd_fpath, engine="cudf", orient="records", lines=True) # Read pandas from byte buffer with hdfs.open(basedir + "/test_json_reader.json") as f: expect = pd.read_json(f, lines=True) assert_eq(expect, got) @pytest.mark.parametrize("test_url", [False, True]) def test_read_orc(datadir, hdfs, test_url): fname = datadir / "orc" / "TestOrcFile.testSnappy.orc" # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/file.orc", buffer) if test_url: hd_fpath = f"hdfs://{host}:{port}{basedir}/file.orc" else: hd_fpath = f"hdfs://{basedir}/file.orc" got = cudf.read_orc(hd_fpath) expect = pd.read_orc(buffer) assert_eq(expect, got) @pytest.mark.parametrize("test_url", [False, True]) def test_write_orc(pdf, hdfs, test_url): # Orc writer doesn't support writing unsigned ints pdf["Integer2"] = pdf["Integer2"].astype("int64") gdf = cudf.from_pandas(pdf) if test_url: hd_fpath = "hdfs://{}:{}{}/test_orc_writer.orc".format( host, port, basedir ) else: hd_fpath = f"hdfs://{basedir}/test_orc_writer.orc" gdf.to_orc(hd_fpath) assert hdfs.exists(f"{basedir}/test_orc_writer.orc") with hdfs.open(f"{basedir}/test_orc_writer.orc", mode="rb") as f: got = pd.read_orc(f) assert_eq(pdf, got) @pytest.mark.parametrize("test_url", [False, True]) def test_read_avro(datadir, hdfs, test_url): fname = datadir / "avro" / "example.avro" # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/file.avro", buffer) if test_url: hd_fpath = f"hdfs://{host}:{port}{basedir}/file.avro" else: hd_fpath = f"hdfs://{basedir}/file.avro" got = cudf.read_avro(hd_fpath) with open(fname, mode="rb") as f: expect = pd.DataFrame.from_records(fastavro.reader(f)) for col in expect.columns: expect[col] = expect[col].astype(got[col].dtype) assert_eq(expect, got) def test_storage_options(tmpdir, pdf, hdfs): fname = tmpdir.mkdir("csv").join("file.csv") # Write to local file system pdf.to_csv(fname) # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/file.csv", buffer) hd_fpath = f"hdfs://{basedir}/file.csv" storage_options = {"host": host, "port": port} got = cudf.read_csv(hd_fpath, storage_options=storage_options) # Read pandas from byte buffer with hdfs.open(basedir + "/file.csv") as f: expect = pd.read_csv(f) assert_eq(expect, got) def test_storage_options_error(tmpdir, pdf, hdfs): fname = tmpdir.mkdir("csv").join("file.csv") # Write to local file system pdf.to_csv(fname) # Read from local file system as buffer with open(fname, mode="rb") as f: buffer = BytesIO(f.read()) # Write to hdfs hdfs.upload(basedir + "/file.csv", buffer) hd_fpath = f"hdfs://{host}:{port}{basedir}/file.avro" storage_options = {"host": host, "port": port} with pytest.raises(KeyError): cudf.read_csv(hd_fpath, storage_options=storage_options)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_applymap.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. import pytest from cudf import NA, DataFrame from cudf.testing import _utils as utils @pytest.mark.parametrize( "data", [ {"a": [1, 2, 3], "b": [4, 5, 6]}, {"a": [1, 2, 3], "b": [1.0, 2.0, 3.0]}, {"a": [1, 2, 3], "b": [True, False, True]}, {"a": [1, NA, 2], "b": [NA, 4, NA]}, ], ) @pytest.mark.parametrize( "func", [ lambda x: x + 1, lambda x: x - 0.5, lambda x: 2 if x is NA else 2 + (x + 1) / 4.1, lambda x: 42, ], ) @pytest.mark.parametrize("na_action", [None, "ignore"]) def test_applymap_dataframe(data, func, na_action): gdf = DataFrame(data) pdf = gdf.to_pandas(nullable=True) expect = pdf.applymap(func, na_action=na_action) got = gdf.applymap(func, na_action=na_action) utils.assert_eq(expect, got, check_dtype=False) def test_applymap_raise_cases(): df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) def f(x, some_kwarg=0): return x + some_kwarg with pytest.raises(NotImplementedError): df.applymap(f, some_kwarg=1) with pytest.raises(ValueError): df.applymap(f, na_action="some_invalid_option")
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_categorical.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import operator import string import warnings from contextlib import contextmanager from textwrap import dedent import numpy as np import pandas as pd import pytest import cudf from cudf.core._compat import PANDAS_GE_134 from cudf.testing._utils import ( NUMERIC_TYPES, assert_eq, assert_exceptions_equal, expect_warning_if, ) @contextmanager def _hide_deprecated_pandas_categorical_inplace_warnings(function_name): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", ( "The `inplace` parameter in " f"pandas.Categorical.{function_name} is deprecated and will " "be removed in a future version." ), category=FutureWarning, ) yield @contextmanager def _hide_cudf_safe_casting_warning(): with warnings.catch_warnings(): warnings.filterwarnings( "ignore", "Can't safely cast column", category=UserWarning, ) yield @pytest.fixture def pd_str_cat(): categories = list("abc") codes = [0, 0, 1, 0, 1, 2, 0, 1, 1, 2] return pd.Categorical.from_codes(codes, categories=categories) def test_categorical_basic(): cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"]) cudf_cat = cudf.Index(cat) pdsr = pd.Series(cat, index=["p", "q", "r", "s", "t"]) sr = cudf.Series(cat, index=["p", "q", "r", "s", "t"]) assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False) # Test attributes assert_eq(pdsr.cat.categories, sr.cat.categories) assert pdsr.cat.ordered == sr.cat.ordered np.testing.assert_array_equal( pdsr.cat.codes.values, sr.cat.codes.to_numpy() ) string = str(sr) expect_str = """ p a q a r b s c t a """ assert all(x == y for x, y in zip(string.split(), expect_str.split())) assert_eq(cat.codes, cudf_cat.codes.to_numpy()) def test_categorical_integer(): cat = pd.Categorical(["a", "_", "_", "c", "a"], categories=["a", "b", "c"]) pdsr = pd.Series(cat) sr = cudf.Series(cat) np.testing.assert_array_equal( cat.codes, sr.cat.codes.astype(cat.codes.dtype).fillna(-1).to_numpy() ) assert sr.null_count == 2 np.testing.assert_array_equal( pdsr.cat.codes.values, sr.cat.codes.astype(pdsr.cat.codes.dtype).fillna(-1).to_numpy(), ) expect_str = dedent( """\ 0 a 1 <NA> 2 <NA> 3 c 4 a dtype: category Categories (3, object): ['a', 'b', 'c']""" ) assert str(sr) == expect_str def test_categorical_compare_unordered(): cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"]) pdsr = pd.Series(cat) sr = cudf.Series(cat) # test equal out = sr == sr assert out.dtype == np.bool_ assert type(out[0]) == np.bool_ assert np.all(out.to_numpy()) assert np.all(pdsr == pdsr) # test inequality out = sr != sr assert not np.any(out.to_numpy()) assert not np.any(pdsr != pdsr) assert not pdsr.cat.ordered assert not sr.cat.ordered # test using ordered operators assert_exceptions_equal( lfunc=operator.lt, rfunc=operator.lt, lfunc_args_and_kwargs=([pdsr, pdsr],), rfunc_args_and_kwargs=([sr, sr],), ) def test_categorical_compare_ordered(): cat1 = pd.Categorical( ["a", "a", "b", "c", "a"], categories=["a", "b", "c"], ordered=True ) pdsr1 = pd.Series(cat1) sr1 = cudf.Series(cat1) cat2 = pd.Categorical( ["a", "b", "a", "c", "b"], categories=["a", "b", "c"], ordered=True ) pdsr2 = pd.Series(cat2) sr2 = cudf.Series(cat2) # test equal out = sr1 == sr1 assert out.dtype == np.bool_ assert type(out[0]) == np.bool_ assert np.all(out.to_numpy()) assert np.all(pdsr1 == pdsr1) # test inequality out = sr1 != sr1 assert not np.any(out.to_numpy()) assert not np.any(pdsr1 != pdsr1) assert pdsr1.cat.ordered assert sr1.cat.ordered # test using ordered operators np.testing.assert_array_equal(pdsr1 < pdsr2, (sr1 < sr2).to_numpy()) np.testing.assert_array_equal(pdsr1 > pdsr2, (sr1 > sr2).to_numpy()) def test_categorical_binary_add(): cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"]) pdsr = pd.Series(cat) sr = cudf.Series(cat) assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([pdsr, pdsr],), rfunc_args_and_kwargs=([sr, sr],), ) def test_categorical_element_indexing(): """ Element indexing to a cat column must give the underlying object not the numerical index. """ cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"]) pdsr = pd.Series(cat) sr = cudf.Series(cat) assert_eq(pdsr, sr) assert_eq(pdsr.cat.codes, sr.cat.codes, check_dtype=False) def test_categorical_masking(): """ Test common operation for getting a all rows that matches a certain category. """ cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"]) pdsr = pd.Series(cat) sr = cudf.Series(cat) # check scalar comparison expect_matches = pdsr == "a" got_matches = sr == "a" np.testing.assert_array_equal( expect_matches.values, got_matches.to_numpy() ) # mask series expect_masked = pdsr[expect_matches] got_masked = sr[got_matches] assert len(expect_masked) == len(got_masked) assert len(expect_masked) == got_masked.valid_count assert_eq(got_masked, expect_masked) def test_df_cat_set_index(): df = cudf.DataFrame() df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc")) df["b"] = np.arange(len(df)) got = df.set_index("a") pddf = df.to_pandas(nullable_pd_dtype=False) expect = pddf.set_index("a") assert_eq(got, expect) def test_df_cat_sort_index(): df = cudf.DataFrame() df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc")) df["b"] = np.arange(len(df)) got = df.set_index("a").sort_index() expect = df.to_pandas(nullable_pd_dtype=False).set_index("a").sort_index() assert_eq(got, expect) def test_cat_series_binop_error(): df = cudf.DataFrame() df["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc")) df["b"] = np.arange(len(df)) pdf = df.to_pandas() # lhs is categorical assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([pdf["a"], pdf["b"]],), rfunc_args_and_kwargs=([df["a"], df["b"]],), ) # lhs is numerical assert_exceptions_equal( lfunc=operator.add, rfunc=operator.add, lfunc_args_and_kwargs=([pdf["b"], pdf["a"]],), rfunc_args_and_kwargs=([df["b"], df["a"]],), ) @pytest.mark.parametrize("num_elements", [10, 100, 1000]) def test_categorical_unique(num_elements): # create categorical series np.random.seed(12) pd_cat = pd.Categorical( pd.Series( np.random.choice( list(string.ascii_letters + string.digits), num_elements ), dtype="category", ) ) # gdf gdf = cudf.DataFrame() gdf["a"] = cudf.Series.from_categorical(pd_cat) gdf_unique_sorted = np.sort(gdf["a"].unique().to_pandas()) # pandas pdf = pd.DataFrame() pdf["a"] = pd_cat pdf_unique_sorted = np.sort(pdf["a"].unique()) # verify np.testing.assert_array_equal(pdf_unique_sorted, gdf_unique_sorted) @pytest.mark.parametrize("nelem", [20, 50, 100]) def test_categorical_unique_count(nelem): # create categorical series np.random.seed(12) pd_cat = pd.Categorical( pd.Series( np.random.choice( list(string.ascii_letters + string.digits), nelem ), dtype="category", ) ) # gdf gdf = cudf.DataFrame() gdf["a"] = cudf.Series.from_categorical(pd_cat) gdf_unique_count = gdf["a"].nunique() # pandas pdf = pd.DataFrame() pdf["a"] = pd_cat pdf_unique = pdf["a"].unique() # verify assert gdf_unique_count == len(pdf_unique) def test_categorical_empty(): cat = pd.Categorical([]) pdsr = pd.Series(cat) sr = cudf.Series(cat) np.testing.assert_array_equal(cat.codes, sr.cat.codes.to_numpy()) # Test attributes assert_eq(pdsr.cat.categories, sr.cat.categories) assert pdsr.cat.ordered == sr.cat.ordered np.testing.assert_array_equal( pdsr.cat.codes.values, sr.cat.codes.to_numpy() ) def test_categorical_set_categories(): cat = pd.Categorical(["a", "a", "b", "c", "a"], categories=["a", "b", "c"]) psr = pd.Series(cat) sr = cudf.Series.from_categorical(cat) # adding category expect = psr.cat.set_categories(["a", "b", "c", "d"]) got = sr.cat.set_categories(["a", "b", "c", "d"]) assert_eq(expect, got) # removing category expect = psr.cat.set_categories(["a", "b"]) got = sr.cat.set_categories(["a", "b"]) assert_eq(expect, got) def test_categorical_set_categories_preserves_order(): series = pd.Series([1, 0, 0, 0, 2]).astype("category") # reassigning categories should preserve element ordering assert_eq( series.cat.set_categories([1, 2]), cudf.Series(series).cat.set_categories([1, 2]), ) @pytest.mark.parametrize("inplace", [True, False]) def test_categorical_as_ordered(pd_str_cat, inplace): pd_sr = pd.Series(pd_str_cat.copy().set_ordered(False)) cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(False)) assert cd_sr.cat.ordered is False assert cd_sr.cat.ordered == pd_sr.cat.ordered # pandas internally uses a deprecated call to set_ordered(inplace=inplace) # inside as_ordered. with pytest.warns(FutureWarning): pd_sr_1 = pd_sr.cat.as_ordered(inplace=inplace) with expect_warning_if(inplace, FutureWarning): cd_sr_1 = cd_sr.cat.as_ordered(inplace=inplace) if inplace: pd_sr_1 = pd_sr cd_sr_1 = cd_sr assert cd_sr_1.cat.ordered is True assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered assert str(cd_sr_1) == str(pd_sr_1) @pytest.mark.parametrize("inplace", [True, False]) def test_categorical_as_unordered(pd_str_cat, inplace): pd_sr = pd.Series(pd_str_cat.copy().set_ordered(True)) cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(True)) assert cd_sr.cat.ordered is True assert cd_sr.cat.ordered == pd_sr.cat.ordered # pandas internally uses a deprecated call to set_ordered(inplace=inplace) # inside as_unordered. with pytest.warns(FutureWarning): pd_sr_1 = pd_sr.cat.as_unordered(inplace=inplace) with expect_warning_if(inplace, FutureWarning): cd_sr_1 = cd_sr.cat.as_unordered(inplace=inplace) if inplace: pd_sr_1 = pd_sr cd_sr_1 = cd_sr assert cd_sr_1.cat.ordered is False assert cd_sr_1.cat.ordered == pd_sr_1.cat.ordered assert str(cd_sr_1) == str(pd_sr_1) @pytest.mark.parametrize("from_ordered", [True, False]) @pytest.mark.parametrize("to_ordered", [True, False]) @pytest.mark.parametrize( "inplace", [ pytest.param( True, marks=pytest.mark.skipif( condition=not PANDAS_GE_134, reason="https://github.com/pandas-dev/pandas/issues/43232", ), ), False, ], ) def test_categorical_reorder_categories( pd_str_cat, from_ordered, to_ordered, inplace ): pd_sr = pd.Series(pd_str_cat.copy().set_ordered(from_ordered)) cd_sr = cudf.Series(pd_str_cat.copy().set_ordered(from_ordered)) assert_eq(pd_sr, cd_sr) assert str(pd_sr) == str(cd_sr) kwargs = dict(ordered=to_ordered, inplace=inplace) with _hide_deprecated_pandas_categorical_inplace_warnings( "reorder_categories" ): pd_sr_1 = pd_sr.cat.reorder_categories(list("cba"), **kwargs) if inplace: with pytest.warns(FutureWarning): cd_sr_1 = cd_sr.cat.reorder_categories(list("cba"), **kwargs) pd_sr_1 = pd_sr cd_sr_1 = cd_sr else: cd_sr_1 = cd_sr.cat.reorder_categories(list("cba"), **kwargs) assert_eq(pd_sr_1, cd_sr_1) assert str(cd_sr_1) == str(pd_sr_1) @pytest.mark.parametrize( "inplace", [ pytest.param( True, marks=pytest.mark.skipif( condition=not PANDAS_GE_134, reason="https://github.com/pandas-dev/pandas/issues/43232", ), ), False, ], ) def test_categorical_add_categories(pd_str_cat, inplace): pd_sr = pd.Series(pd_str_cat.copy()) cd_sr = cudf.Series(pd_str_cat.copy()) assert_eq(pd_sr, cd_sr) assert str(pd_sr) == str(cd_sr) with _hide_deprecated_pandas_categorical_inplace_warnings( "add_categories" ): pd_sr_1 = pd_sr.cat.add_categories(["d"], inplace=inplace) if inplace: with pytest.warns(FutureWarning): cd_sr_1 = cd_sr.cat.add_categories(["d"], inplace=inplace) pd_sr_1 = pd_sr cd_sr_1 = cd_sr else: cd_sr_1 = cd_sr.cat.add_categories(["d"], inplace=inplace) assert "d" in pd_sr_1.cat.categories.to_list() assert "d" in cd_sr_1.cat.categories.to_pandas().to_list() assert_eq(pd_sr_1, cd_sr_1) @pytest.mark.parametrize( "inplace", [ pytest.param( True, marks=pytest.mark.skipif( condition=not PANDAS_GE_134, reason="https://github.com/pandas-dev/pandas/issues/43232", ), ), False, ], ) def test_categorical_remove_categories(pd_str_cat, inplace): pd_sr = pd.Series(pd_str_cat.copy()) cd_sr = cudf.Series(pd_str_cat.copy()) assert_eq(pd_sr, cd_sr) assert str(pd_sr) == str(cd_sr) with _hide_deprecated_pandas_categorical_inplace_warnings( "remove_categories" ): pd_sr_1 = pd_sr.cat.remove_categories(["a"], inplace=inplace) if inplace: with pytest.warns(FutureWarning): cd_sr_1 = cd_sr.cat.remove_categories(["a"], inplace=inplace) pd_sr_1 = pd_sr cd_sr_1 = cd_sr else: cd_sr_1 = cd_sr.cat.remove_categories(["a"], inplace=inplace) assert "a" not in pd_sr_1.cat.categories.to_list() assert "a" not in cd_sr_1.cat.categories.to_pandas().to_list() assert_eq(pd_sr_1, cd_sr_1) # test using ordered operators with _hide_deprecated_pandas_categorical_inplace_warnings( "remove_categories" ) as _, pytest.warns(FutureWarning) as _: assert_exceptions_equal( lfunc=cd_sr.to_pandas().cat.remove_categories, rfunc=cd_sr.cat.remove_categories, lfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}), rfunc_args_and_kwargs=([["a", "d"]], {"inplace": inplace}), ) def test_categorical_dataframe_slice_copy(): pdf = pd.DataFrame({"g": pd.Series(["a", "b", "z"], dtype="category")}) gdf = cudf.from_pandas(pdf) exp = pdf[1:].copy() gdf = gdf[1:].copy() assert_eq(exp, gdf) @pytest.mark.parametrize( "data", [ pd.Series([1, 2, 3, 89]), pd.Series([1, 2, 3, 89, 3, 1, 89], dtype="category"), pd.Series(["1", "2", "3", "4", "5"], dtype="category"), pd.Series(["1.0", "2.5", "3.001", "9"], dtype="category"), pd.Series(["1", "2", "3", None, "4", "5"], dtype="category"), pd.Series(["1.0", "2.5", "3.001", None, "9"], dtype="category"), pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]), pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]), pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"), pd.Series([1, 2, 3, 89], dtype="float64"), pd.Series([1, 2.5, 3.001, 89], dtype="float64"), pd.Series([None, None, None]), pd.Series([], dtype="float64"), ], ) @pytest.mark.parametrize( "cat_type", [ pd.CategoricalDtype(categories=["aa", "bb", "cc"]), pd.CategoricalDtype(categories=[2, 4, 10, 100]), pd.CategoricalDtype(categories=["aa", "bb", "c"]), pd.CategoricalDtype(categories=["a", "bb", "c"]), pd.CategoricalDtype(categories=["a", "b", "c"]), pd.CategoricalDtype(categories=["1", "2", "3", "4"]), pd.CategoricalDtype(categories=["1.0", "2.5", "3.001", "9"]), pd.CategoricalDtype(categories=[]), ], ) def test_categorical_typecast(data, cat_type): pd_data = data.copy() gd_data = cudf.from_pandas(data) assert_eq(pd_data.astype(cat_type), gd_data.astype(cat_type)) @pytest.mark.parametrize( "data", [ pd.Series([1, 2, 3, 89]), pd.Series(["a", "b", "c", "c", "b", "a", "b", "b"]), pd.Series(["aa", "b", "c", "c", "bb", "bb", "a", "b", "b"]), pd.Series([1, 2, 3, 89, None, np.nan, np.NaN], dtype="float64"), pd.Series([1, 2, 3, 89], dtype="float64"), pd.Series([1, 2.5, 3.001, 89], dtype="float64"), pd.Series([None, None, None]), pd.Series([], dtype="float64"), ], ) @pytest.mark.parametrize( "new_categories", [ ["aa", "bb", "cc"], [2, 4, 10, 100], ["aa", "bb", "c"], ["a", "bb", "c"], ["a", "b", "c"], [], pd.Series(["a", "b", "c"]), pd.Series(["a", "b", "c"], dtype="category"), pd.Series([-100, 10, 11, 0, 1, 2], dtype="category"), ], ) def test_categorical_set_categories_categoricals(data, new_categories): pd_data = data.copy().astype("category") gd_data = cudf.from_pandas(pd_data) expected = pd_data.cat.set_categories(new_categories=new_categories) with _hide_cudf_safe_casting_warning(): actual = gd_data.cat.set_categories(new_categories=new_categories) assert_eq(expected, actual) expected = pd_data.cat.set_categories( new_categories=pd.Series(new_categories, dtype="category") ) with _hide_cudf_safe_casting_warning(): actual = gd_data.cat.set_categories( new_categories=cudf.Series(new_categories, dtype="category") ) assert_eq(expected, actual) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4], ["a", "1", "2", "1", "a"], pd.Series(["a", "1", "22", "1", "aa"]), pd.Series(["a", "1", "22", "1", "aa"], dtype="category"), pd.Series([1, 2, 3, -4], dtype="int64"), pd.Series([1, 2, 3, 4], dtype="uint64"), pd.Series([1, 2.3, 3, 4], dtype="float"), np.asarray([0, 2, 1]), [None, 1, None, 2, None], [], ], ) @pytest.mark.parametrize( "dtype", [ pd.CategoricalDtype(categories=["aa", "bb", "cc"]), pd.CategoricalDtype(categories=[2, 4, 10, 100]), pd.CategoricalDtype(categories=["aa", "bb", "c"]), pd.CategoricalDtype(categories=["a", "bb", "c"]), pd.CategoricalDtype(categories=["a", "b", "c"]), pd.CategoricalDtype(categories=["22", "b", "c"]), pd.CategoricalDtype(categories=[]), ], ) def test_categorical_creation(data, dtype): expected = pd.Series(data, dtype=dtype) got = cudf.Series(data, dtype=dtype) assert_eq(expected, got) got = cudf.Series(data, dtype=cudf.from_pandas(dtype)) assert_eq(expected, got) expected = pd.Series(data, dtype="category") got = cudf.Series(data, dtype="category") assert_eq(expected, got) @pytest.mark.parametrize( "categories", [ [], [1, 2, 3], pd.Series(["a", "c", "b"], dtype="category"), pd.Series([1, 2, 3, 4, -100], dtype="category"), ], ) @pytest.mark.parametrize("ordered", [True, False]) def test_categorical_dtype(categories, ordered): expected = pd.CategoricalDtype(categories=categories, ordered=ordered) got = cudf.CategoricalDtype(categories=categories, ordered=ordered) assert_eq(expected, got) expected = pd.CategoricalDtype(categories=categories) got = cudf.CategoricalDtype(categories=categories) assert_eq(expected, got) @pytest.mark.parametrize( ("data", "expected"), [ (cudf.Series([1]), np.uint8), (cudf.Series([1, None]), np.uint8), (cudf.Series(np.arange(np.iinfo(np.int8).max)), np.uint8), ( cudf.Series(np.append(np.arange(np.iinfo(np.int8).max), [None])), np.uint8, ), (cudf.Series(np.arange(np.iinfo(np.int16).max)), np.uint16), ( cudf.Series(np.append(np.arange(np.iinfo(np.int16).max), [None])), np.uint16, ), (cudf.Series(np.arange(np.iinfo(np.uint8).max)), np.uint8), ( cudf.Series(np.append(np.arange(np.iinfo(np.uint8).max), [None])), np.uint8, ), (cudf.Series(np.arange(np.iinfo(np.uint16).max)), np.uint16), ( cudf.Series(np.append(np.arange(np.iinfo(np.uint16).max), [None])), np.uint16, ), ], ) def test_astype_dtype(data, expected): got = data.astype("category").cat.codes.dtype np.testing.assert_equal(got, expected) @pytest.mark.parametrize( "data,add", [ ([1, 2, 3], [100, 11, 12]), ([1, 2, 3], [0.01, 9.7, 15.0]), ([0.0, 6.7, 10.0], [100, 11, 12]), ([0.0, 6.7, 10.0], [0.01, 9.7, 15.0]), (["a", "bd", "ef"], ["asdfsdf", "bddf", "eff"]), ([1, 2, 3], []), ([0.0, 6.7, 10.0], []), (["a", "bd", "ef"], []), ], ) def test_add_categories(data, add): pds = pd.Series(data, dtype="category") gds = cudf.Series(data, dtype="category") expected = pds.cat.add_categories(add) with _hide_cudf_safe_casting_warning(): actual = gds.cat.add_categories(add) assert_eq( expected.cat.codes, actual.cat.codes.astype(expected.cat.codes.dtype) ) # Need to type-cast pandas object to str due to mixed-type # support in "object" assert_eq( expected.cat.categories.astype("str") if (expected.cat.categories.dtype == "object") else expected.cat.categories, actual.cat.categories, ) @pytest.mark.parametrize( "data,add", [ ([1, 2, 3], [1, 3, 11]), ([0.0, 6.7, 10.0], [1, 2, 0.0]), (["a", "bd", "ef"], ["a", "bd", "a"]), ], ) def test_add_categories_error(data, add): pds = pd.Series(data, dtype="category") gds = cudf.Series(data, dtype="category") assert_exceptions_equal( pds.cat.add_categories, gds.cat.add_categories, ([add],), ([add],), ) def test_add_categories_mixed_error(): gds = cudf.Series(["a", "bd", "ef"], dtype="category") with pytest.raises(TypeError): gds.cat.add_categories([1, 2, 3]) gds = cudf.Series([1, 2, 3], dtype="category") with pytest.raises(TypeError): gds.cat.add_categories(["a", "bd", "ef"]) @pytest.mark.parametrize( "data", [ [1, 2, 3, 4], ["a", "1", "2", "1", "a"], pd.Series(["a", "1", "22", "1", "aa"]), pd.Series(["a", "1", "22", "1", "aa"], dtype="category"), pd.Series([1, 2, 3, 4], dtype="int64"), pd.Series([1, 2.3, 3, 4], dtype="float"), [None, 1, None, 2, None], ["a"], ], ) @pytest.mark.parametrize( "cat_dtype", [ pd.CategoricalDtype(categories=["aa", "bb", "cc"]), pd.CategoricalDtype(categories=[2, 4, 10, 100]), pd.CategoricalDtype(categories=["aa", "bb", "c"]), pd.CategoricalDtype(categories=["a", "bb", "c"]), pd.CategoricalDtype(categories=["a", "b", "c"]), pd.CategoricalDtype(categories=["22", "b", "c"]), pd.CategoricalDtype(categories=["a"]), ], ) def test_categorical_assignment(data, cat_dtype): pd_df = pd.DataFrame() pd_df["a"] = np.ones(len(data)) cd_df = cudf.from_pandas(pd_df) pd_cat_series = pd.Series(data, dtype=cat_dtype) # assign categorical series pd_df.assign(cat_col=pd_cat_series) cd_df.assign(cat_col=pd_cat_series) assert_eq(pd_df, cd_df) # assign categorical array # needed for dask_cudf support for including file name # as a categorical column # see issue: https://github.com/rapidsai/cudf/issues/2269 pd_df = pd.DataFrame() pd_df["a"] = np.ones(len(data)) cd_df = cudf.from_pandas(pd_df) pd_categorical = pd.Categorical(data, dtype=cat_dtype) pd_df.assign(cat_col=pd_categorical) cd_df.assign(cat_col=pd_categorical) assert_eq(pd_df, cd_df) def test_categorical_allow_nan(): gs = cudf.Series([1, 2, np.nan, 10, np.nan, None], nan_as_null=False) gs = gs.astype("category") expected_codes = cudf.Series([0, 1, 3, 2, 3, None], dtype="uint8") assert_eq(expected_codes, gs.cat.codes) expected_categories = cudf.Index([1.0, 2.0, 10.0, np.nan], dtype="float64") assert_eq(expected_categories, gs.cat.categories) actual_ps = gs.to_pandas() expected_ps = pd.Series( [1.0, 2.0, np.nan, 10.0, np.nan, np.nan], dtype="category" ) assert_eq(actual_ps, expected_ps) def test_categorical_setitem_with_nan(): gs = cudf.Series( [1, 2, np.nan, 10, np.nan, None], nan_as_null=False ).astype("category") gs[[1, 3]] = np.nan expected_series = cudf.Series( [1, np.nan, np.nan, np.nan, np.nan, None], nan_as_null=False ).astype(gs.dtype) assert_eq(gs, expected_series) @pytest.mark.parametrize("dtype", list(NUMERIC_TYPES) + ["object"]) @pytest.mark.parametrize("input_obj", [[1, cudf.NA, 3]]) def test_series_construction_with_nulls(input_obj, dtype): dtype = cudf.dtype(dtype) input_obj = [ dtype.type(v) if v is not cudf.NA else cudf.NA for v in input_obj ] expect = pd.Series(input_obj, dtype="category") got = cudf.Series(input_obj, dtype="category").to_pandas() assert_eq(expect, got) @pytest.mark.parametrize( "data", [ {"a": cudf.Series(["a", "b", "c", "a", "c", "b"]).astype("category")}, { "a": cudf.Series(["a", "a", "b", "b"]).astype("category"), "b": cudf.Series(["b", "b", "c", "c"]).astype("category"), "c": cudf.Series(["c", "c", "a", "a"]).astype("category"), }, { "a": cudf.Series(["a", None, "b", "b"]).astype("category"), "b": cudf.Series(["b", "b", None, "c"]).astype("category"), "c": cudf.Series(["c", "c", "a", None]).astype("category"), }, ], ) def test_serialize_categorical_columns(data): df = cudf.DataFrame(data) recreated = df.__class__.deserialize(*df.serialize()) assert_eq(recreated, df) @pytest.mark.parametrize( "data", [["$ 1", "$ 2", "hello"], ["($) 1", "( 2", "hello", "^1$"]] ) @pytest.mark.parametrize("value", ["$ 1", "hello", "$", "^1$"]) def test_categorical_string_index_contains(data, value): idx = cudf.CategoricalIndex(data) pidx = idx.to_pandas() assert_eq(value in idx, value in pidx) def test_categorical_index_with_dtype(): dtype = cudf.CategoricalDtype(categories=["a", "z", "c"]) gi = cudf.Index(["z", "c", "a"], dtype=dtype) pi = pd.Index(["z", "c", "a"], dtype=dtype.to_pandas()) assert_eq(gi, pi) assert_eq(gi.dtype, pi.dtype) assert_eq(gi.dtype.categories, pi.dtype.categories) def test_cat_iterate_error(): s = cudf.Series([1, 2, 3], dtype="category") with pytest.raises(TypeError): iter(s.cat) @pytest.mark.parametrize("ordered", [True, False]) def test_empty_series_category_cast(ordered): dtype = cudf.CategoricalDtype(ordered=ordered) ps = pd.Series([], dtype="str") gs = cudf.from_pandas(ps) expected = ps.astype(dtype.to_pandas()) actual = gs.astype(dtype) assert_eq(expected, actual) assert_eq(expected.dtype.ordered, actual.dtype.ordered)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_feather.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import os from string import ascii_letters import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.testing._utils import NUMERIC_TYPES, assert_eq @pytest.fixture(params=[0, 1, 10, 100]) def pdf(request): types = NUMERIC_TYPES + ["bool"] nrows = request.param # Create a pandas dataframe with random data of mixed types test_pdf = pd.DataFrame( { f"col_{typ}": np.random.randint(0, nrows, nrows).astype(typ) for typ in types } ) # Delete the name of the column index, and rename the row index test_pdf.columns.name = None test_pdf.index.name = "index" # Create non-numeric categorical data otherwise may get typecasted data = [ascii_letters[np.random.randint(0, 52)] for i in range(nrows)] test_pdf["col_category"] = pd.Series(data, dtype="category") # Feather can't handle indexes properly test_pdf = test_pdf.reset_index(drop=True) test_pdf.index.name = None return test_pdf @pytest.fixture def gdf(pdf): return cudf.DataFrame.from_pandas(pdf) @pytest.fixture def feather_file(tmp_path_factory, pdf): fname = tmp_path_factory.mktemp("feather") / "test.feather" pdf.to_feather(fname) return fname @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.filterwarnings("ignore:Strings are not yet supported") @pytest.mark.parametrize( "columns", [["col_int8"], ["col_category"], ["col_int32", "col_float32"], None], ) def test_feather_reader(feather_file, columns): expect = pa.feather.read_table(feather_file, columns=columns).to_pandas() got = ( cudf.read_feather(feather_file, columns=columns) .to_arrow(preserve_index=False) .to_pandas() ) assert_eq(expect, got, check_categorical=False) @pytest.mark.filterwarnings("ignore:Using CPU") def test_feather_writer(tmpdir, pdf, gdf): pdf_fname = tmpdir.join("pdf.feather") gdf_fname = tmpdir.join("gdf.feather") pdf.to_feather(pdf_fname) gdf.to_feather(gdf_fname) assert os.path.exists(pdf_fname) assert os.path.exists(gdf_fname) expect = pa.feather.read_table(pdf_fname) got = pa.feather.read_table(gdf_fname) assert pa.Table.equals(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_cuda_array_interface.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. import types from contextlib import ExitStack as does_not_raise import cupy import numpy as np import pandas as pd import pytest from numba import cuda import cudf from cudf.core.buffer.spill_manager import get_global_manager from cudf.testing._utils import DATETIME_TYPES, NUMERIC_TYPES, assert_eq @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES) @pytest.mark.parametrize("module", ["cupy", "numba"]) def test_cuda_array_interface_interop_in(dtype, module): np_data = np.arange(10).astype(dtype) expectation = does_not_raise() if module == "cupy": module_constructor = cupy.array if dtype in DATETIME_TYPES: expectation = pytest.raises(ValueError) elif module == "numba": module_constructor = cuda.to_device with expectation: module_data = module_constructor(np_data) pd_data = pd.Series(np_data) # Test using a specific function for __cuda_array_interface__ here cudf_data = cudf.Series(module_data) assert_eq(pd_data, cudf_data) gdf = cudf.DataFrame() gdf["test"] = module_data pd_data.name = "test" assert_eq(pd_data, gdf["test"]) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES + ["str"]) @pytest.mark.parametrize("module", ["cupy", "numba"]) def test_cuda_array_interface_interop_out(dtype, module): expectation = does_not_raise() if dtype == "str": expectation = pytest.raises(AttributeError) if module == "cupy": module_constructor = cupy.asarray def to_host_function(x): return cupy.asnumpy(x) elif module == "numba": module_constructor = cuda.as_cuda_array def to_host_function(x): return x.copy_to_host() with expectation: np_data = np.arange(10).astype(dtype) cudf_data = cudf.Series(np_data) assert isinstance(cudf_data.__cuda_array_interface__, dict) module_data = module_constructor(cudf_data) got = to_host_function(module_data) expect = np_data assert_eq(expect, got) @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES) @pytest.mark.parametrize("module", ["cupy", "numba"]) def test_cuda_array_interface_interop_out_masked(dtype, module): expectation = does_not_raise() if module == "cupy": pytest.skip( "cupy doesn't support version 1 of " "`__cuda_array_interface__` yet" ) module_constructor = cupy.asarray def to_host_function(x): return cupy.asnumpy(x) elif module == "numba": expectation = pytest.raises(NotImplementedError) module_constructor = cuda.as_cuda_array def to_host_function(x): return x.copy_to_host() np_data = np.arange(10).astype("float64") np_data[[0, 2, 4, 6, 8]] = np.nan with expectation: cudf_data = cudf.Series(np_data).astype(dtype) assert isinstance(cudf_data.__cuda_array_interface__, dict) module_data = module_constructor(cudf_data) # noqa: F841 @pytest.mark.parametrize("dtype", NUMERIC_TYPES + DATETIME_TYPES) @pytest.mark.parametrize("nulls", ["all", "some", "bools", "none"]) @pytest.mark.parametrize("mask_type", ["bits", "bools"]) def test_cuda_array_interface_as_column(dtype, nulls, mask_type): sr = cudf.Series(np.arange(10)) if nulls == "some": mask = [ True, False, True, False, False, True, True, False, True, True, ] sr[sr[~np.asarray(mask)]] = None elif nulls == "all": sr[:] = None sr = sr.astype(dtype) obj = types.SimpleNamespace( __cuda_array_interface__=sr.__cuda_array_interface__ ) if mask_type == "bools": if nulls == "some": obj.__cuda_array_interface__["mask"] = cuda.to_device(mask) elif nulls == "all": obj.__cuda_array_interface__["mask"] = cuda.to_device([False] * 10) expect = sr got = cudf.Series(obj) assert_eq(expect, got) def test_column_from_ephemeral_cupy(): # Test that we keep a reference to the ephemeral # CuPy array. If we didn't, then `a` would end # up referring to the same memory as `b` due to # CuPy's caching allocator a = cudf.Series(cupy.asarray([1, 2, 3])) b = cudf.Series(cupy.asarray([1, 1, 1])) assert_eq(pd.Series([1, 2, 3]), a) assert_eq(pd.Series([1, 1, 1]), b) def test_column_from_ephemeral_cupy_try_lose_reference(): # Try to lose the reference we keep to the ephemeral # CuPy array a = cudf.Series(cupy.asarray([1, 2, 3]))._column a = cudf.core.column.as_column(a) b = cupy.asarray([1, 1, 1]) # noqa: F841 assert_eq(pd.Series([1, 2, 3]), a.to_pandas()) a = cudf.Series(cupy.asarray([1, 2, 3]))._column a.name = "b" b = cupy.asarray([1, 1, 1]) # noqa: F841 assert_eq(pd.Series([1, 2, 3]), a.to_pandas()) @pytest.mark.xfail( get_global_manager() is not None, reason=( "spilling doesn't support PyTorch, see " "`cudf.core.buffer.spillable_buffer.DelayedPointerTuple`" ), ) def test_cuda_array_interface_pytorch(): torch = pytest.importorskip("torch", minversion="1.6.0") if not torch.cuda.is_available(): pytest.skip("need gpu version of pytorch to be installed") series = cudf.Series([1, -1, 10, -56]) tensor = torch.tensor(series) got = cudf.Series(tensor) assert_eq(got, series) buffer = cudf.core.buffer.as_buffer(cupy.ones(10, dtype=np.bool_)) tensor = torch.tensor(buffer) got = cudf.Series(tensor, dtype=np.bool_) assert_eq(got, cudf.Series(buffer, dtype=np.bool_)) index = cudf.Index([], dtype="float64") tensor = torch.tensor(index) got = cudf.Index(tensor) assert_eq(got, index) index = cudf.core.index.RangeIndex(start=0, stop=100) tensor = torch.tensor(index) got = cudf.Series(tensor) assert_eq(got, cudf.Series(index)) index = cudf.Index([1, 2, 8, 6]) tensor = torch.tensor(index) got = cudf.Index(tensor) assert_eq(got, index) str_series = cudf.Series(["a", "g"]) with pytest.raises(NotImplementedError): str_series.__cuda_array_interface__ cat_series = str_series.astype("category") with pytest.raises(TypeError): cat_series.__cuda_array_interface__ def test_cai_after_indexing(): df = cudf.DataFrame({"a": [1, 2, 3]}) cai1 = df["a"].__cuda_array_interface__ df[["a"]] cai2 = df["a"].__cuda_array_interface__ assert cai1 == cai2
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_options.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. from contextlib import redirect_stdout from io import StringIO import pytest import cudf @pytest.fixture(scope="class", autouse=False) def empty_option_environment(): old_option_environment = cudf.options._OPTIONS cudf.options._OPTIONS = {} yield cudf.options._OPTIONS = old_option_environment @pytest.fixture(scope="function") def odd_option(empty_option_environment): def validator(x): if not x % 2 == 1: raise ValueError(f"Invalid option value {x}") cudf.options._register_option( "odd_option", 1, "An odd option.", validator, ) yield del cudf.options._OPTIONS["odd_option"] @pytest.fixture(scope="function") def even_option(empty_option_environment): def validator(x): if not x % 2 == 0: raise ValueError(f"Invalid option value {x}") cudf.options._register_option( "even_option", 0, "An even option.", validator ) yield del cudf.options._OPTIONS["even_option"] @pytest.mark.usefixtures("odd_option", "even_option") class TestCleanOptions: def test_option_get_set(odd_option): assert cudf.get_option("odd_option") == 1 cudf.set_option("odd_option", 101) assert cudf.get_option("odd_option") == 101 def test_option_set_invalid(odd_option): with pytest.raises(ValueError, match="Invalid option value 0"): cudf.set_option("odd_option", 0) def test_option_description(odd_option): s = StringIO() with redirect_stdout(s): cudf.describe_option("odd_option") s.seek(0) expected = ( "odd_option:\n\tAn odd option.\n\t[Default: 1] [Current: 1]\n" ) assert expected == s.read() def test_option_description_all(odd_option, even_option): s = StringIO() with redirect_stdout(s): cudf.describe_option() s.seek(0) expected = ( "odd_option:\n\tAn odd option.\n\t[Default: 1] [Current: 1]\n" "even_option:\n\tAn even option.\n\t[Default: 0] [Current: 0]\n" ) assert expected == s.read() @pytest.mark.parametrize("default_integer_bitwidth", [32, 64, None]) def test_empty_option_context(default_integer_bitwidth): prev_setting = cudf.get_option("default_integer_bitwidth") cudf.set_option("default_integer_bitwidth", default_integer_bitwidth) with cudf.option_context(): assert ( cudf.get_option("default_integer_bitwidth") == default_integer_bitwidth ) assert ( cudf.get_option("default_integer_bitwidth") == default_integer_bitwidth ) cudf.set_option("default_integer_bitwidth", prev_setting) @pytest.mark.parametrize("pandas_compatible", [True, False]) @pytest.mark.parametrize("default_integer_bitwidth", [32, 64]) def test_option_context(pandas_compatible, default_integer_bitwidth): prev_pandas_compatible_setting = cudf.get_option("mode.pandas_compatible") prev_width_setting = cudf.get_option("default_integer_bitwidth") with cudf.option_context( "mode.pandas_compatible", pandas_compatible, "default_integer_bitwidth", default_integer_bitwidth, ): assert cudf.get_option("mode.pandas_compatible") is pandas_compatible assert ( cudf.get_option("default_integer_bitwidth") is default_integer_bitwidth ) assert ( cudf.get_option("mode.pandas_compatible") is prev_pandas_compatible_setting ) assert cudf.get_option("default_integer_bitwidth") is prev_width_setting def test_options_context_error(): with pytest.raises(ValueError): with cudf.option_context("mode.pandas_compatible"): pass with pytest.raises(ValueError): with cudf.option_context("mode.pandas_compatible", 1, 2): pass
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_resampling.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. import numpy as np import pandas as pd import pytest import cudf from cudf.testing._utils import assert_eq def assert_resample_results_equal(lhs, rhs, **kwargs): assert_eq( lhs.sort_index(), rhs.sort_index(), check_dtype=False, check_freq=False, **kwargs, ) @pytest.mark.parametrize("ts_resolution", ["ns", "s", "ms"]) def test_series_downsample_simple(ts_resolution): # Series with and index of 5min intervals: index = pd.date_range(start="2001-01-01", periods=10, freq="1T") psr = pd.Series(range(10), index=index) gsr = cudf.from_pandas(psr) gsr.index = gsr.index.astype(f"datetime64[{ts_resolution}]") assert_resample_results_equal( psr.resample("3T").sum(), gsr.resample("3T").sum(), ) def test_series_upsample_simple(): # Series with and index of 5min intervals: index = pd.date_range(start="2001-01-01", periods=10, freq="1T") psr = pd.Series(range(10), index=index) gsr = cudf.from_pandas(psr) assert_resample_results_equal( psr.resample("3T").sum(), gsr.resample("3T").sum(), ) @pytest.mark.parametrize("rule", ["2S", "10S"]) def test_series_resample_ffill(rule): rng = pd.date_range("1/1/2012", periods=10, freq="5S") ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) gts = cudf.from_pandas(ts) assert_resample_results_equal( ts.resample(rule).ffill(), gts.resample(rule).ffill() ) @pytest.mark.parametrize("rule", ["2S", "10S"]) def test_series_resample_bfill(rule): rng = pd.date_range("1/1/2012", periods=10, freq="5S") ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) gts = cudf.from_pandas(ts) assert_resample_results_equal( ts.resample(rule).bfill(), gts.resample(rule).bfill() ) @pytest.mark.parametrize("rule", ["2S", "10S"]) def test_series_resample_asfreq(rule): rng = pd.date_range("1/1/2012", periods=100, freq="5S") ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) gts = cudf.from_pandas(ts) assert_resample_results_equal( ts.resample(rule).asfreq(), gts.resample(rule).asfreq() ) def test_dataframe_resample_aggregation_simple(): pdf = pd.DataFrame( np.random.randn(1000, 3), index=pd.date_range("1/1/2012", freq="S", periods=1000), columns=["A", "B", "C"], ) gdf = cudf.from_pandas(pdf) assert_resample_results_equal( pdf.resample("3T").mean(), gdf.resample("3T").mean() ) def test_dataframe_resample_multiagg(): pdf = pd.DataFrame( np.random.randn(1000, 3), index=pd.date_range("1/1/2012", freq="S", periods=1000), columns=["A", "B", "C"], ) gdf = cudf.from_pandas(pdf) assert_resample_results_equal( pdf.resample("3T").agg(["sum", "mean", "std"]), gdf.resample("3T").agg(["sum", "mean", "std"]), ) def test_dataframe_resample_on(): # test resampling on a specified column pdf = pd.DataFrame( { "x": np.random.randn(1000), "y": pd.date_range("1/1/2012", freq="S", periods=1000), } ) gdf = cudf.from_pandas(pdf) assert_resample_results_equal( pdf.resample("3T", on="y").mean(), gdf.resample("3T", on="y").mean() ) def test_dataframe_resample_level(): # test resampling on a specific level of a MultIndex pdf = pd.DataFrame( { "x": np.random.randn(1000), "y": pd.date_range("1/1/2012", freq="S", periods=1000), } ) pdi = pd.MultiIndex.from_frame(pdf) pdf = pd.DataFrame({"a": np.random.randn(1000)}, index=pdi) gdf = cudf.from_pandas(pdf) assert_resample_results_equal( pdf.resample("3T", level="y").mean(), gdf.resample("3T", level="y").mean(), ) @pytest.mark.parametrize( "in_freq, sampling_freq, out_freq", [ ("1ns", "1us", "us"), ("1us", "10us", "us"), ("ms", "100us", "us"), ("ms", "1s", "s"), ("s", "1T", "s"), ("1T", "30s", "s"), ("1D", "10D", "s"), ("10D", "1D", "s"), ], ) def test_resampling_frequency_conversion(in_freq, sampling_freq, out_freq): # test that we cast to the appropriate frequency # when resampling: pdf = pd.DataFrame( { "x": np.random.randn(100), "y": pd.date_range("1/1/2012", freq=in_freq, periods=100), } ) gdf = cudf.from_pandas(pdf) expect = pdf.resample(sampling_freq, on="y").mean() got = gdf.resample(sampling_freq, on="y").mean() assert_resample_results_equal(expect, got) assert got.index.dtype == np.dtype(f"datetime64[{out_freq}]")
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_transform.py
# Copyright (c) 2018-2022, NVIDIA CORPORATION. import numpy as np import pytest from cudf import Series from cudf.testing._utils import NUMERIC_TYPES supported_types = NUMERIC_TYPES def _generic_function(a): return a**3 @pytest.mark.parametrize("dtype", supported_types) @pytest.mark.parametrize( "udf,testfunc", [ (_generic_function, lambda ser: ser**3), (lambda x: x in [1, 2, 3, 4], lambda ser: np.isin(ser, [1, 2, 3, 4])), ], ) def test_apply_python_lambda(dtype, udf, testfunc): size = 500 lhs_arr = np.random.random(size).astype(dtype) lhs_ser = Series(lhs_arr) out_ser = lhs_ser.apply(udf) result = testfunc(lhs_arr) np.testing.assert_almost_equal(result, out_ser.to_numpy())
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_testing.py
# Copyright (c) 2020-2022, NVIDIA CORPORATION. import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.core.column.column import as_column, full from cudf.testing import ( assert_frame_equal, assert_index_equal, assert_series_equal, ) from cudf.testing._utils import ( NUMERIC_TYPES, OTHER_TYPES, assert_column_memory_eq, assert_column_memory_ne, assert_eq, ) from cudf.testing.testing import assert_column_equal @pytest.fixture( params=[ pa.array([*range(10)]), pa.array(["hello", "world", "rapids", "AI"]), pa.array([[1, 2, 3], [4, 5], [6], [], [7]]), pa.array([{"f0": "hello", "f1": 42}, {"f0": "world", "f1": 3}]), ] ) def arrow_arrays(request): return request.param @pytest.mark.parametrize("rdata", [[1, 2, 5], [1, 2, 6], [1, 2, 5, 6]]) @pytest.mark.parametrize("exact", ["equiv", True, False]) @pytest.mark.parametrize("check_names", [True, False]) @pytest.mark.parametrize("rname", ["a", "b"]) @pytest.mark.parametrize("check_categorical", [True, False]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + OTHER_TYPES + ["datetime64[ns]"] ) def test_basic_assert_index_equal( rdata, exact, check_names, rname, check_categorical, dtype, ): p_left = pd.Index([1, 2, 3], name="a", dtype=dtype) p_right = pd.Index(rdata, name=rname, dtype=dtype) left = cudf.from_pandas(p_left) right = cudf.from_pandas(p_right) kind = None try: pd.testing.assert_index_equal( p_left, p_right, exact=exact, check_names=check_names, check_categorical=check_categorical, ) except BaseException as e: kind = type(e) msg = str(e) if kind is not None: if (kind == TypeError) and ( msg == ( "Categoricals can only be compared " "if 'categories' are the same." ) ): kind = AssertionError with pytest.raises(kind): assert_index_equal( left, right, exact=exact, check_names=check_names, check_categorical=check_categorical, ) else: assert_index_equal( left, right, exact=exact, check_names=check_names, check_categorical=check_categorical, ) @pytest.mark.parametrize("rdata", [[1, 2, 5], [1, 2, 6], [1, 2, 5, 6]]) @pytest.mark.parametrize("check_names", [True, False]) @pytest.mark.parametrize("rname", ["a", "b"]) @pytest.mark.parametrize("check_category_order", [True, False]) @pytest.mark.parametrize("check_categorical", [True, False]) @pytest.mark.parametrize( "dtype", NUMERIC_TYPES + OTHER_TYPES + ["datetime64[ns]"] ) def test_basic_assert_series_equal( rdata, rname, check_names, check_category_order, check_categorical, dtype, ): p_left = pd.Series([1, 2, 3], name="a", dtype=dtype) p_right = pd.Series(rdata, name=rname, dtype=dtype) left = cudf.from_pandas(p_left) right = cudf.from_pandas(p_right) kind = None try: pd.testing.assert_series_equal( p_left, p_right, check_names=check_names, check_categorical=check_categorical, check_category_order=check_category_order, ) except BaseException as e: kind = type(e) if kind is not None: with pytest.raises(kind): assert_series_equal( left, right, check_names=check_names, check_categorical=check_categorical, check_category_order=check_category_order, ) else: assert_series_equal( left, right, check_names=check_names, check_categorical=check_categorical, check_category_order=check_category_order, ) @pytest.mark.parametrize( "other", [ as_column(["1", "2", "3"]), as_column([[1], [2], [3]]), as_column([{"a": 1}, {"a": 2}, {"a": 3}]), ], ) def test_assert_column_equal_dtype_edge_cases(other): # string series should be 100% different # even when the elements are the same base = as_column([1, 2, 3]) # for these dtypes, the diff should always be 100% regardless of the values with pytest.raises( AssertionError, match=r".*values are different \(100.0 %\).*" ): assert_column_equal(base, other, check_dtype=False) # the exceptions are the empty and all null cases assert_column_equal(base.slice(0, 0), other.slice(0, 0), check_dtype=False) assert_column_equal(other.slice(0, 0), base.slice(0, 0), check_dtype=False) base = full(len(base), fill_value=cudf.NA, dtype=base.dtype) other = full(len(other), fill_value=cudf.NA, dtype=other.dtype) assert_column_equal(base, other, check_dtype=False) assert_column_equal(other, base, check_dtype=False) @pytest.mark.parametrize( "rdtype", [["int8", "int16", "int64"], ["int64", "int16", "int8"]] ) @pytest.mark.parametrize("rname", [["a", "b", "c"], ["b", "c", "a"]]) @pytest.mark.parametrize("index", [[1, 2, 3], [3, 2, 1]]) @pytest.mark.parametrize("check_exact", [True, False]) @pytest.mark.parametrize("check_dtype", [True, False]) @pytest.mark.parametrize("check_names", [True, False]) @pytest.mark.parametrize("check_like", [True, False]) @pytest.mark.parametrize("mismatch", [True, False]) def test_basic_assert_frame_equal( rdtype, rname, index, check_exact, check_dtype, check_names, check_like, mismatch, ): data = [1, 2, 1] p_left = pd.DataFrame(index=[1, 2, 3]) p_left["a"] = np.array(data, dtype="int8") p_left["b"] = np.array(data, dtype="int16") if mismatch: p_left["c"] = np.array([1, 2, 3], dtype="int64") else: p_left["c"] = np.array(data, dtype="int64") p_right = pd.DataFrame(index=index) for dtype, name in zip(rdtype, rname): p_right[name] = np.array(data, dtype=dtype) left = cudf.from_pandas(p_left) right = cudf.from_pandas(p_right) kind = None try: pd.testing.assert_frame_equal( p_left, p_right, check_exact=check_exact, check_dtype=check_dtype, check_names=check_names, check_like=check_like, ) except BaseException as e: kind = type(e) if kind is not None: with pytest.raises(kind): assert_frame_equal( left, right, check_exact=check_exact, check_dtype=check_dtype, check_names=check_names, check_like=check_like, ) else: assert_frame_equal( left, right, check_exact=check_exact, check_dtype=check_dtype, check_names=check_names, check_like=check_like, ) @pytest.mark.parametrize("rdata", [[0, 1, 2, 3], [0, 1, 2, 4]]) @pytest.mark.parametrize("check_datetimelike_compat", [True, False]) def test_datetime_like_compaibility(rdata, check_datetimelike_compat): psr1 = pd.Series([0, 1, 2, 3], dtype="datetime64[ns]") psr2 = pd.Series(rdata, dtype="datetime64[ns]").astype("str") sr1 = cudf.from_pandas(psr1) sr2 = cudf.from_pandas(psr2) kind = None try: pd.testing.assert_series_equal( psr1, psr2, check_datetimelike_compat=check_datetimelike_compat ) except BaseException as e: kind = type(e) if kind is not None: with pytest.raises(kind): assert_series_equal( sr1, sr2, check_datetimelike_compat=check_datetimelike_compat ) else: assert_series_equal( sr1, sr2, check_datetimelike_compat=check_datetimelike_compat ) @pytest.mark.parametrize( "rdata", [ [[0, 1, 2, 3], ["G", "O", "N", "E"]], [[0, 1, 2, 4], ["G", "O", "N", "E"]], ], ) def test_multiindex_equal(rdata): pidx1 = pd.MultiIndex.from_arrays( [[0, 1, 2, 3], ["G", "O", "N", "E"]], names=("n", "id") ) pidx2 = pd.MultiIndex.from_arrays(rdata, names=("n", "id")) idx1 = cudf.from_pandas(pidx1) idx2 = cudf.from_pandas(pidx2) kind = None try: pd.testing.assert_index_equal(pidx1, pidx2) except BaseException as e: kind = type(e) if kind is not None: with pytest.raises(kind): assert_index_equal(idx1, idx2) else: assert_index_equal(idx1, idx2) @pytest.mark.parametrize("dtype", ["int8", "uint8", "float32"]) @pytest.mark.parametrize("check_exact", [True, False]) @pytest.mark.parametrize("check_dtype", [True, False]) def test_series_different_type_cases(dtype, check_exact, check_dtype): data = [0, 1, 2, 3] psr1 = pd.Series(data, dtype="uint8") psr2 = pd.Series(data, dtype=dtype) sr1 = cudf.from_pandas(psr1) sr2 = cudf.from_pandas(psr2) kind = None try: pd.testing.assert_series_equal( psr1, psr2, check_exact=check_exact, check_dtype=check_dtype ) except BaseException as e: kind = type(e) if kind is not None: with pytest.raises(kind): assert_series_equal( sr1, sr2, check_exact=check_exact, check_dtype=check_dtype ) else: assert_series_equal( sr1, sr2, check_exact=check_exact, check_dtype=check_dtype ) @pytest.mark.parametrize( "dtype", ["int8", "int16", "int32", "int64"], ) @pytest.mark.parametrize("exact", ["equiv", True, False]) def test_range_index_and_int_index_eqaulity(dtype, exact): pidx1 = pd.RangeIndex(0, stop=5, step=1) pidx2 = pd.Index([0, 1, 2, 3, 4]) idx1 = cudf.from_pandas(pidx1) idx2 = cudf.Index([0, 1, 2, 3, 4], dtype=dtype) kind = None try: pd.testing.assert_index_equal(pidx1, pidx2, exact=exact) except BaseException as e: kind = type(e) if kind is not None: with pytest.raises(kind): assert_index_equal(idx1, idx2, exact=exact) else: assert_index_equal(idx1, idx2, exact=exact) @pytest.mark.parametrize( "left, right", [ (1493282, 1493282), (1493282.0, 1493282.0 + 1e-8), ("abc", "abc"), (0, np.array(0)), ( np.datetime64(123456, "ns"), pd.Timestamp(np.datetime64(123456, "ns")), ), ("int64", np.dtype("int64")), (np.nan, np.nan), ], ) def test_basic_scalar_equality(left, right): assert_eq(left, right) @pytest.mark.parametrize( "left, right", [ (1493282, 1493274), (1493282.0, 1493282.0 + 1e-6), ("abc", "abd"), (0, np.array(1)), ( np.datetime64(123456, "ns"), pd.Timestamp(np.datetime64(123457, "ns")), ), ("int64", np.dtype("int32")), ], ) def test_basic_scalar_inequality(left, right): with pytest.raises(AssertionError, match=r".*not (almost )?equal.*"): assert_eq(left, right) def test_assert_column_memory_basic(arrow_arrays): left = cudf.core.column.ColumnBase.from_arrow(arrow_arrays) right = cudf.core.column.ColumnBase.from_arrow(arrow_arrays) with pytest.raises(AssertionError): assert_column_memory_eq(left, right) assert_column_memory_ne(left, right) def test_assert_column_memory_slice(arrow_arrays): col = cudf.core.column.ColumnBase.from_arrow(arrow_arrays) left = col.slice(0, 1) right = col.slice(1, 2) with pytest.raises(AssertionError): assert_column_memory_eq(left, right) assert_column_memory_ne(left, right) with pytest.raises(AssertionError): assert_column_memory_eq(left, col) assert_column_memory_ne(left, col) with pytest.raises(AssertionError): assert_column_memory_eq(right, col) assert_column_memory_ne(right, col) def test_assert_column_memory_basic_same(arrow_arrays): data = cudf.core.column.ColumnBase.from_arrow(arrow_arrays) buf = cudf.core.buffer.as_buffer(data.base_data) left = cudf.core.column.build_column(buf, dtype=np.int32) right = cudf.core.column.build_column(buf, dtype=np.int32) assert_column_memory_eq(left, right) with pytest.raises(AssertionError): assert_column_memory_ne(left, right)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/pytest.ini
# Copyright (c) 2022, NVIDIA CORPORATION. [pytest] markers = spilling: mark benchmark a good candidate to run with `CUDF_SPILL=ON` xfail_strict = true filterwarnings = error ignore:::.*xdist.* ignore:::.*pytest.*
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_monotonic.py
# Copyright (c) 2019-2023, NVIDIA CORPORATION. """ Tests related to is_unique and is_monotonic attributes """ import numpy as np import pandas as pd import pytest import cudf from cudf import Index, MultiIndex, Series from cudf.core.index import ( CategoricalIndex, DatetimeIndex, GenericIndex, RangeIndex, ) from cudf.testing._utils import assert_eq, expect_warning_if @pytest.mark.parametrize("testrange", [(10, 20, 1), (0, -10, -1), (5, 5, 1)]) def test_range_index(testrange): index = RangeIndex( start=testrange[0], stop=testrange[1], step=testrange[2] ) index_pd = pd.RangeIndex( start=testrange[0], stop=testrange[1], step=testrange[2] ) assert index.is_unique == index_pd.is_unique with pytest.warns(FutureWarning): expect = index_pd.is_monotonic with pytest.warns(FutureWarning): got = index.is_monotonic assert got == expect assert index.is_monotonic_increasing == index_pd.is_monotonic_increasing assert index.is_monotonic_decreasing == index_pd.is_monotonic_decreasing @pytest.mark.parametrize( "testlist", [ [1, 2, 3, 4], [1, 2, 3, 3, 4], [10, 9, 8, 7], [10, 9, 8, 8, 7], ["c", "d", "e", "f"], ["c", "d", "e", "e", "f"], ["z", "y", "x", "r"], ["z", "y", "x", "x", "r"], ], ) def test_generic_index(testlist): index = GenericIndex(testlist) index_pd = pd.Index(testlist) assert index.is_unique == index_pd.is_unique with pytest.warns(FutureWarning): expect = index_pd.is_monotonic with pytest.warns(FutureWarning): got = index.is_monotonic assert got == expect assert index.is_monotonic_increasing == index_pd.is_monotonic_increasing assert index.is_monotonic_decreasing == index_pd.is_monotonic_decreasing @pytest.mark.parametrize( "testlist", [ ["c", "d", "e", "f"], ["c", "d", "e", "e", "f"], ["z", "y", "x", "r"], ["z", "y", "x", "x", "r"], ], ) def test_string_index(testlist): index = cudf.Index(testlist) index_pd = pd.Index(testlist) assert index.is_unique == index_pd.is_unique with pytest.warns(FutureWarning): expect = index_pd.is_monotonic with pytest.warns(FutureWarning): got = index.is_monotonic assert got == expect assert index.is_monotonic_increasing == index_pd.is_monotonic_increasing assert index.is_monotonic_decreasing == index_pd.is_monotonic_decreasing @pytest.mark.parametrize( "testlist", [["c", "d", "e", "f"], ["z", "y", "x", "r"]] ) def test_categorical_index(testlist): # Assuming unordered categorical data cannot be "monotonic" raw_cat = pd.Categorical(testlist, ordered=True) index = CategoricalIndex(raw_cat) index_pd = pd.CategoricalIndex(raw_cat) assert index.is_unique == index_pd.is_unique with pytest.warns(FutureWarning): expect = index_pd.is_monotonic with pytest.warns(FutureWarning): got = index.is_monotonic assert got == expect assert index.is_monotonic_increasing == index_pd.is_monotonic_increasing assert index.is_monotonic_decreasing == index_pd.is_monotonic_decreasing @pytest.mark.parametrize( "testlist", [ [ "2001-01-01 00:00:00", "2001-02-03 08:00:00", "2001-03-08 16:00:00", "2001-04-11 00:00:00", ], [ "2001-04-11 00:00:00", "2001-03-08 16:00:00", "2001-02-03 08:00:00", "2001-01-01 00:00:00", ], [ "2001-04-11 00:00:00", "2001-02-03 08:00:00", "2001-03-08 16:00:00", "2001-01-01 00:00:00", ], [ "2001-04-11 00:00:00", "2001-01-01 00:00:00", "2001-02-03 08:00:00", "2001-03-08 16:00:00", "2001-01-01 00:00:00", ], ], ) def test_datetime_index(testlist): index = DatetimeIndex(testlist) index_pd = pd.DatetimeIndex(testlist) assert index.is_unique == index_pd.is_unique with pytest.warns(FutureWarning): expect = index_pd.is_monotonic with pytest.warns(FutureWarning): got = index.is_monotonic assert got == expect assert index.is_monotonic_increasing == index_pd.is_monotonic_increasing assert index.is_monotonic_decreasing == index_pd.is_monotonic_decreasing @pytest.mark.parametrize( "testlist", [ [1, 2, 3, 4], [1, 2, 3, 3, 4], [10, 9, 8, 7], [10, 9, 8, 8, 7], ["c", "d", "e", "f"], ["c", "d", "e", "e", "f"], ["z", "y", "x", "r"], ["z", "y", "x", "x", "r"], ], ) def test_series(testlist): series = Series(testlist) series_pd = pd.Series(testlist) assert series.is_unique == series_pd.is_unique with pytest.warns(FutureWarning): expect = series_pd.index.is_monotonic with pytest.warns(FutureWarning): got = series.index.is_monotonic assert got == expect assert series.is_monotonic_increasing == series_pd.is_monotonic_increasing assert series.is_monotonic_decreasing == series_pd.is_monotonic_decreasing def test_multiindex(): pdf = pd.DataFrame(np.random.rand(7, 5)) pdf.index = pd.MultiIndex( [ ["a", "b", "c"], ["house", "store", "forest"], ["clouds", "clear", "storm"], ["fire", "smoke", "clear"], ], [ [0, 0, 0, 0, 1, 1, 2], [1, 1, 1, 1, 0, 0, 2], [0, 0, 2, 2, 2, 0, 1], [0, 0, 0, 1, 2, 0, 1], ], ) pdf.index.names = ["alpha", "location", "weather", "sign"] gdf = cudf.from_pandas(pdf) assert pdf.index.is_unique == gdf.index.is_unique with pytest.warns(FutureWarning): expect = pdf.index.is_monotonic with pytest.warns(FutureWarning): got = gdf.index.is_monotonic assert got == expect assert ( pdf.index.is_monotonic_increasing == gdf.index.is_monotonic_increasing ) assert ( pdf.index.is_monotonic_decreasing == gdf.index.is_monotonic_decreasing ) @pytest.mark.parametrize( "testarr", [ ( [ ["bar", "bar", "foo", "foo", "qux", "qux", "qux"], ["one", "two", "one", "two", "one", "two", "two"], ], ["first", "second"], ), ( [ ["bar", "bar", "foo", "foo", "qux", "qux"], ["one", "two", "one", "two", "one", "two"], ], ["first", "second"], ), ], ) def test_multiindex_tuples(testarr): tuples = list(zip(*testarr[0])) index = MultiIndex.from_tuples(tuples, names=testarr[1]) index_pd = pd.MultiIndex.from_tuples(tuples, names=testarr[1]) assert index.is_unique == index_pd.is_unique with pytest.warns(FutureWarning): expect = index_pd.is_monotonic with pytest.warns(FutureWarning): got = index.is_monotonic assert got == expect assert index.is_monotonic_increasing == index_pd.is_monotonic_increasing assert index.is_monotonic_decreasing == index_pd.is_monotonic_decreasing @pytest.mark.parametrize( "testlist", [ [10, 9, 8, 8, 7], [2.0, 5.0, 4.0, 3.0, 7.0], ["b", "d", "e", "a", "c"], ["frog", "cat", "bat", "dog"], ], ) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("kind", ["loc", "getitem", None]) def test_get_slice_bound(testlist, side, kind): index = GenericIndex(testlist) index_pd = pd.Index(testlist) for label in testlist: with pytest.warns(FutureWarning): expect = index_pd.get_slice_bound(label, side, kind) with expect_warning_if(kind is not None, FutureWarning): got = index.get_slice_bound(label, side, kind) assert got == expect @pytest.mark.parametrize("bounds", [(0, 10), (0, 1), (3, 4), (0, 0), (3, 3)]) @pytest.mark.parametrize( "indices", [[-1, 0, 5, 10, 11], [-1, 0, 1, 2], [2, 3, 4, 5], [-1, 0, 1], [2, 3, 4]], ) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("kind", ["getitem", "loc"]) def test_rangeindex_get_slice_bound_basic(bounds, indices, side, kind): start, stop = bounds pd_index = pd.RangeIndex(start, stop) cudf_index = RangeIndex(start, stop) for idx in indices: with pytest.warns(FutureWarning): expect = pd_index.get_slice_bound(idx, side, kind) with expect_warning_if(kind is not None, FutureWarning): got = cudf_index.get_slice_bound(idx, side, kind) assert expect == got @pytest.mark.parametrize( "bounds", [(3, 20, 5), (20, 3, -5), (20, 3, 5), (3, 20, -5), (0, 0, 2), (3, 3, 2)], ) @pytest.mark.parametrize( "label", [3, 8, 13, 18, 20, 15, 10, 5, -1, 0, 19, 21, 6, 11, 17], ) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("kind", ["getitem", "loc"]) def test_rangeindex_get_slice_bound_step(bounds, label, side, kind): start, stop, step = bounds pd_index = pd.RangeIndex(start, stop, step) cudf_index = RangeIndex(start, stop, step) with pytest.warns(FutureWarning): expect = pd_index.get_slice_bound(label, side, kind) with expect_warning_if(kind is not None, FutureWarning): got = cudf_index.get_slice_bound(label, side, kind) assert expect == got @pytest.mark.parametrize("label", [1, 3, 5, 7, 9, 11]) @pytest.mark.parametrize("side", ["left", "right"]) @pytest.mark.parametrize("kind", ["loc", "getitem", None]) def test_get_slice_bound_missing(label, side, kind): mylist = [2, 4, 6, 8, 10] index = GenericIndex(mylist) index_pd = pd.Index(mylist) with pytest.warns(FutureWarning): expect = index_pd.get_slice_bound(label, side, kind) with expect_warning_if(kind is not None, FutureWarning): got = index.get_slice_bound(label, side, kind) assert got == expect @pytest.mark.parametrize("label", ["a", "c", "e", "g"]) @pytest.mark.parametrize("side", ["left", "right"]) def test_get_slice_bound_missing_str(label, side): mylist = ["b", "d", "f"] index = GenericIndex(mylist) index_pd = pd.Index(mylist) with pytest.warns(FutureWarning): got = index.get_slice_bound(label, side, "getitem") with pytest.warns(FutureWarning): expect = index_pd.get_slice_bound(label, side, "getitem") assert got == expect testdata = [ ( Series(["2018-01-01", "2019-01-31", None], dtype="datetime64[ms]"), False, ), (Series([1, 2, 3, None]), False), (Series([None, 1, 2, 3]), False), (Series(["a", "b", "c", None]), False), (Series([None, "a", "b", "c"]), False), ] @pytest.mark.parametrize("data, expected", testdata) def test_is_monotonic_always_falls_for_null(data, expected): assert_eq(expected, data.is_monotonic_increasing) assert_eq(expected, data.is_monotonic_decreasing) @pytest.mark.parametrize("box", [Series, Index]) @pytest.mark.parametrize( "value,na_like", [ [1, None], [np.datetime64("2020-01-01", "ns"), np.datetime64("nat", "ns")], ["s", None], [1.0, np.nan], ], ids=repr, ) def test_is_unique(box, value, na_like): obj = box([value], nan_as_null=False) assert obj.is_unique obj = box([value, value], nan_as_null=False) assert not obj.is_unique obj = box([None, value], nan_as_null=False) assert obj.is_unique obj = box([None, None, value], nan_as_null=False) assert not obj.is_unique if na_like is not None: obj = box([na_like, value], nan_as_null=False) assert obj.is_unique obj = box([na_like, na_like], nan_as_null=False) assert not obj.is_unique try: if not np.isnat(na_like): # pyarrow coerces nat to null obj = box([None, na_like, value], nan_as_null=False) assert obj.is_unique except TypeError: pass
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_datasets.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import numpy as np import cudf as gd from cudf.testing._utils import assert_eq def test_dataset_timeseries(): gdf1 = gd.datasets.timeseries( dtypes={"x": int, "y": float}, freq="120s", nulls_frequency=0.3, seed=1 ) gdf2 = gd.datasets.timeseries( dtypes={"x": int, "y": float}, freq="120s", nulls_frequency=0.3, seed=1 ) assert_eq(gdf1, gdf2) assert gdf1["x"].head().dtype == int assert gdf1["y"].head().dtype == float assert gdf1.index.name == "timestamp" gdf = gd.datasets.timeseries( "2000", "2010", freq="2H", dtypes={"value": float, "name": "category", "id": int}, nulls_frequency=0.7, seed=1, ) assert gdf["value"].head().dtype == float assert gdf["id"].head().dtype == int assert gdf["name"].head().dtype == "category" gdf = gd.datasets.randomdata() assert gdf["id"].head().dtype == int assert gdf["x"].head().dtype == float assert gdf["y"].head().dtype == float assert len(gdf) == 10 gdf = gd.datasets.randomdata( nrows=20, dtypes={"id": int, "a": int, "b": float} ) assert gdf["id"].head().dtype == int assert gdf["a"].head().dtype == int assert gdf["b"].head().dtype == float assert len(gdf) == 20 def test_make_bool(): n = 10 state = np.random.RandomState(12) arr = gd.datasets.make_bool(n, state) assert np.alltrue(np.isin(arr, [True, False])) assert arr.size == n assert arr.dtype == bool
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_df_protocol.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. from typing import Any, Tuple import cupy as cp import pandas as pd import pytest import cudf from cudf.core._compat import PANDAS_GE_150 from cudf.core.buffer import as_buffer from cudf.core.column import as_column, build_column from cudf.core.df_protocol import ( DataFrameObject, _CuDFBuffer, _CuDFColumn, _DtypeKind, _MaskKind, _protocol_buffer_to_cudf_buffer, from_dataframe, protocol_dtype_to_cupy_dtype, ) from cudf.testing._utils import assert_eq @pytest.fixture( params=[ {"a": [1, 2, 3], "b": ["x", "y", "z"]}, {"a": [1, 2, None], "b": ["x", "y", "z"]}, {"a": [1, 2, 3], "b": pd.Categorical(["x", "y", None])}, ] ) def pandas_df(request): data = request.param return pd.DataFrame(data) def assert_validity_equal(protocol_buffer, cudf_buffer, size, null, valid): if null == _MaskKind.BYTEMASK: protocol_mask = _protocol_buffer_to_cudf_buffer(protocol_buffer) assert_eq( as_column(protocol_mask, dtype="bool"), as_column(cudf_buffer, dtype="bool"), ) elif null == _MaskKind.BITMASK: protocol_mask = _protocol_buffer_to_cudf_buffer(protocol_buffer) cudf_mask = cudf_buffer assert_eq( build_column( as_buffer(cp.zeros(10, dtype="int8")), "int8", size=size, mask=protocol_mask, children=(), ), build_column( as_buffer(cp.zeros(10, dtype="int8")), "int8", size=size, mask=cudf_mask, children=(), ), ) else: raise NotImplementedError() def assert_buffer_equal(buffer_and_dtype: Tuple[_CuDFBuffer, Any], cudfcol): buf, dtype = buffer_and_dtype device_id = cp.asarray(cudfcol.data).device.id assert buf.__dlpack_device__() == (2, device_id) col_from_buf = build_column( _protocol_buffer_to_cudf_buffer(buf), protocol_dtype_to_cupy_dtype(dtype), ) # check that non null values are the equals as nulls are represented # by sentinel values in the buffer. # FIXME: In gh-10202 some minimal fixes were added to unblock CI. But # currently only non-null values are compared, null positions are # unchecked. non_null_idxs = ~cudf.Series(cudfcol).isna() assert_eq( col_from_buf.apply_boolean_mask(non_null_idxs), cudfcol.apply_boolean_mask(non_null_idxs), ) array_from_dlpack = cp.from_dlpack(buf.__dlpack__()).get() col_array = cp.asarray(cudfcol.data_array_view(mode="read")).get() assert_eq( array_from_dlpack[non_null_idxs.to_numpy()].flatten(), col_array[non_null_idxs.to_numpy()].flatten(), ) def assert_column_equal(col: _CuDFColumn, cudfcol): assert col.size() == cudfcol.size assert col.offset == 0 assert col.null_count == cudfcol.null_count assert col.num_chunks() == 1 if col.null_count == 0: pytest.raises(RuntimeError, col._get_validity_buffer) assert col.get_buffers()["validity"] is None else: assert_validity_equal( col.get_buffers()["validity"][0], cudfcol.mask, cudfcol.size, *col.describe_null, ) if col.dtype[0] == _DtypeKind.CATEGORICAL: assert_buffer_equal(col.get_buffers()["data"], cudfcol.codes) assert col.get_buffers()["offsets"] is None elif col.dtype[0] == _DtypeKind.STRING: assert_buffer_equal(col.get_buffers()["data"], cudfcol.children[1]) assert_buffer_equal(col.get_buffers()["offsets"], cudfcol.children[0]) else: assert_buffer_equal(col.get_buffers()["data"], cudfcol) assert col.get_buffers()["offsets"] is None if col.null_count == 0: assert col.describe_null == (0, None) else: assert col.describe_null == (3, 0) def assert_dataframe_equal(dfo: DataFrameObject, df: cudf.DataFrame): assert dfo.num_columns() == len(df.columns) assert dfo.num_rows() == len(df) assert dfo.num_chunks() == 1 assert dfo.column_names() == tuple(df.columns) for col in df.columns: assert_column_equal(dfo.get_column_by_name(col), df[col]._column) def assert_from_dataframe_equals(dfobj, allow_copy): df2 = from_dataframe(dfobj, allow_copy=allow_copy) assert_dataframe_equal(dfobj.__dataframe__(allow_copy), df2) if isinstance(dfobj, cudf.DataFrame): assert_eq(dfobj, df2) elif isinstance(dfobj, pd.DataFrame): assert_eq(cudf.DataFrame(dfobj), df2) else: raise TypeError(f"{type(dfobj)} not supported yet.") def test_from_dataframe_exception(pandas_df): exception_msg = "This operation must copy data from CPU to GPU." " Set `allow_copy=True` to allow it." with pytest.raises(TypeError, match=exception_msg): from_dataframe(pandas_df) def assert_df_unique_dtype_cols(data): cdf = cudf.DataFrame(data=data) assert_from_dataframe_equals(cdf, allow_copy=False) assert_from_dataframe_equals(cdf, allow_copy=True) def test_from_dataframe(): data = dict(a=[1, 2, 3], b=[9, 10, 11]) df1 = cudf.DataFrame(data=data) df2 = cudf.from_dataframe(df1) assert_eq(df1, df2) df3 = cudf.from_dataframe(df2) assert_eq(df1, df3) def test_int_dtype(): data_int = dict(a=[1, 2, 3], b=[9, 10, 11]) assert_df_unique_dtype_cols(data_int) def test_float_dtype(): data_float = dict(a=[1.5, 2.5, 3.5], b=[9.2, 10.5, 11.8]) assert_df_unique_dtype_cols(data_float) def test_categorical_dtype(): cdf = cudf.DataFrame({"A": [1, 2, 5, 1]}) cdf["A"] = cdf["A"].astype("category") col = cdf.__dataframe__().get_column_by_name("A") assert col.dtype[0] == _DtypeKind.CATEGORICAL assert col.describe_categorical == (False, True, {0: 1, 1: 2, 2: 5}) assert_from_dataframe_equals(cdf, allow_copy=False) assert_from_dataframe_equals(cdf, allow_copy=True) def test_bool_dtype(): data_bool = dict(a=[True, True, False], b=[False, True, False]) assert_df_unique_dtype_cols(data_bool) def test_string_dtype(): data_string = dict(a=["a", "b", "cdef", "", "g"]) assert_df_unique_dtype_cols(data_string) def test_mixed_dtype(): data_mixed = dict( int=[1, 2, 3], float=[1.5, 2.5, 3.5], bool=[True, False, True], categorical=[5, 1, 5], string=["rapidsai-cudf ", "", "df protocol"], ) assert_df_unique_dtype_cols(data_mixed) def test_NA_int_dtype(): data_int = dict( a=[1, None, 3, None, 5], b=[9, 10, None, 7, 8], c=[6, 19, 20, 100, 1000], ) assert_df_unique_dtype_cols(data_int) def test_NA_float_dtype(): data_float = dict( a=[1.4, None, 3.6, None, 5.2], b=[9.7, 10.9, None, 7.8, 8.2], c=[6.1, 19.2, 20.3, 100.4, 1000.5], ) assert_df_unique_dtype_cols(data_float) def test_NA_categorical_dtype(): df = cudf.DataFrame({"A": [1, 2, 5, 1]}) df["B"] = df["A"].astype("category") df.at[[1, 3], "B"] = None # Set two items to null # Some detailed testing for correctness of dtype and null handling: col = df.__dataframe__().get_column_by_name("B") assert col.dtype[0] == _DtypeKind.CATEGORICAL assert col.null_count == 2 assert col.describe_null == (3, 0) assert col.num_chunks() == 1 assert col.describe_categorical == (False, True, {0: 1, 1: 2, 2: 5}) assert_from_dataframe_equals(df, allow_copy=False) assert_from_dataframe_equals(df, allow_copy=True) def test_NA_bool_dtype(): data_bool = dict(a=[None, True, False], b=[False, None, None]) assert_df_unique_dtype_cols(data_bool) def test_NA_string_dtype(): df = cudf.DataFrame({"A": ["a", "b", "cdef", "", "g"]}) df["B"] = df["A"].astype("object") df.at[1, "B"] = cudf.NA # Set one item to null # Test for correctness and null handling: col = df.__dataframe__().get_column_by_name("B") assert col.dtype[0] == _DtypeKind.STRING assert col.null_count == 1 assert col.describe_null == (3, 0) assert col.num_chunks() == 1 assert_from_dataframe_equals(df, allow_copy=False) assert_from_dataframe_equals(df, allow_copy=True) def test_NA_mixed_dtype(): data_mixed = dict( int=[1, None, 2, 3, 1000], float=[None, 1.5, 2.5, 3.5, None], bool=[True, None, False, None, None], categorical=[5, 1, 5, 3, None], string=[None, None, None, "df protocol", None], ) assert_df_unique_dtype_cols(data_mixed) @pytest.mark.skipif( not PANDAS_GE_150, reason="Pandas versions < 1.5.0 do not support interchange protocol", ) def test_from_cpu_df(pandas_df): cudf.from_dataframe(pandas_df, allow_copy=True)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_decimal.py
# Copyright (c) 2021-2023, NVIDIA CORPORATION. import decimal from decimal import Decimal import numpy as np import pyarrow as pa import pytest from packaging import version import cudf from cudf.core.column import Decimal32Column, Decimal64Column, NumericalColumn from cudf.core.dtypes import Decimal32Dtype, Decimal64Dtype from cudf.testing._utils import ( FLOAT_TYPES, INTEGER_TYPES, SIGNED_TYPES, _decimal_series, assert_eq, expect_warning_if, ) data_ = [ [Decimal("1.1"), Decimal("2.2"), Decimal("3.3"), Decimal("4.4")], [Decimal("-1.1"), Decimal("2.2"), Decimal("3.3"), Decimal("4.4")], [1], [-1], [1, 2, 3, 4], [42, 17, 41], [1, 2, None, 4], [None, None, None], [], ] typ_ = [ pa.decimal128(precision=4, scale=2), pa.decimal128(precision=5, scale=3), pa.decimal128(precision=6, scale=4), ] @pytest.mark.parametrize("data_", data_) @pytest.mark.parametrize("typ_", typ_) def test_round_trip_decimal64_column(data_, typ_): pa_arr = pa.array(data_, type=typ_) col_64 = Decimal64Column.from_arrow(pa_arr) assert pa_arr.equals(col_64.to_arrow()) @pytest.mark.parametrize("data_", data_) @pytest.mark.parametrize("typ_", typ_) def test_round_trip_decimal32_column(data_, typ_): pa_arr = pa.array(data_, type=typ_) col_32 = Decimal32Column.from_arrow(pa_arr) assert pa_arr.equals(col_32.to_arrow()) def test_from_arrow_max_precision_decimal64(): with pytest.raises(ValueError): Decimal64Column.from_arrow( pa.array([1, 2, 3], type=pa.decimal128(scale=0, precision=19)) ) def test_from_arrow_max_precision_decimal32(): with pytest.raises(ValueError): Decimal32Column.from_arrow( pa.array([1, 2, 3], type=pa.decimal128(scale=0, precision=10)) ) @pytest.mark.parametrize( "data", [ cudf.Series( [ 14.12302, 97938.2, np.nan, 0.0, -8.302014, np.nan, 94.31304, -112.2314, 0.3333333, np.nan, ] ), ], ) @pytest.mark.parametrize("from_dtype", FLOAT_TYPES) @pytest.mark.parametrize( "to_dtype", [Decimal64Dtype(7, 2), Decimal64Dtype(11, 4), Decimal64Dtype(18, 9)], ) def test_typecast_from_float_to_decimal(request, data, from_dtype, to_dtype): request.applymarker( pytest.mark.xfail( condition=version.parse(pa.__version__) >= version.parse("13.0.0") and from_dtype == np.dtype("float32") and to_dtype.precision > 7, reason="https://github.com/rapidsai/cudf/issues/14169", ) ) got = data.astype(from_dtype) pa_arr = got.to_arrow().cast( pa.decimal128(to_dtype.precision, to_dtype.scale) ) expected = cudf.Series(Decimal64Column.from_arrow(pa_arr)) got = got.astype(to_dtype) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ cudf.Series( [ 14.12302, 38.2, np.nan, 0.0, -8.302014, np.nan, 94.31304, np.nan, -112.2314, 0.3333333, np.nan, ] ), ], ) @pytest.mark.parametrize("from_dtype", INTEGER_TYPES) @pytest.mark.parametrize( "to_dtype", [Decimal64Dtype(9, 3), Decimal64Dtype(11, 4), Decimal64Dtype(18, 9)], ) def test_typecast_from_int_to_decimal(data, from_dtype, to_dtype): got = data.astype(from_dtype) pa_arr = ( got.to_arrow() .cast("float64") .cast(pa.decimal128(to_dtype.precision, to_dtype.scale)) ) expected = cudf.Series(Decimal64Column.from_arrow(pa_arr)) got = got.astype(to_dtype) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ cudf.Series( [ 14.12309, 2.343942, np.nan, 0.0, -8.302082, np.nan, 94.31308, -112.2364, -8.029972, np.nan, ] ), ], ) @pytest.mark.parametrize( "from_dtype", [ Decimal64Dtype(7, 2), Decimal64Dtype(11, 4), Decimal64Dtype(18, 10), Decimal32Dtype(7, 2), Decimal32Dtype(5, 3), Decimal32Dtype(9, 5), ], ) @pytest.mark.parametrize( "to_dtype", [ Decimal64Dtype(7, 2), Decimal64Dtype(18, 10), Decimal64Dtype(11, 4), Decimal32Dtype(7, 2), Decimal32Dtype(9, 5), Decimal32Dtype(5, 3), ], ) def test_typecast_to_from_decimal(data, from_dtype, to_dtype): if from_dtype.scale > to_dtype.MAX_PRECISION: pytest.skip( "This is supposed to overflow because the representation value in " "the source exceeds the max representable in destination dtype." ) s = data.astype(from_dtype) pa_arr = s.to_arrow().cast( pa.decimal128(to_dtype.precision, to_dtype.scale), safe=False ) if isinstance(to_dtype, Decimal32Dtype): expected = cudf.Series(Decimal32Column.from_arrow(pa_arr)) elif isinstance(to_dtype, Decimal64Dtype): expected = cudf.Series(Decimal64Column.from_arrow(pa_arr)) with expect_warning_if(to_dtype.scale < s.dtype.scale, UserWarning): got = s.astype(to_dtype) assert_eq(got, expected) @pytest.mark.parametrize( "data", [ cudf.Series( [ 14.12309, 2.343942, np.nan, 0.0, -8.302082, np.nan, 94.31308, -112.2364, -8.029972, np.nan, ] ), ], ) @pytest.mark.parametrize( "from_dtype", [Decimal64Dtype(7, 2), Decimal64Dtype(11, 4), Decimal64Dtype(17, 10)], ) @pytest.mark.parametrize("to_dtype", SIGNED_TYPES) def test_typecast_from_decimal(data, from_dtype, to_dtype): got = data.astype(from_dtype) pa_arr = got.to_arrow().cast(to_dtype, safe=False) got = got.astype(to_dtype) expected = cudf.Series(NumericalColumn.from_arrow(pa_arr)) assert_eq(got, expected) assert_eq(got.dtype, expected.dtype) @pytest.mark.parametrize( "args", [ # scatter to a single index ( ["1", "2", "3"], Decimal64Dtype(1, 0), Decimal(5), 1, ["1", "5", "3"], ), ( ["1.5", "2.5", "3.5"], Decimal64Dtype(2, 1), Decimal("5.5"), 1, ["1.5", "5.5", "3.5"], ), ( ["1.0042", "2.0042", "3.0042"], Decimal64Dtype(5, 4), Decimal("5.0042"), 1, ["1.0042", "5.0042", "3.0042"], ), # scatter via boolmask ( ["1", "2", "3"], Decimal64Dtype(1, 0), Decimal(5), cudf.Series([True, False, True]), ["5", "2", "5"], ), ( ["1.5", "2.5", "3.5"], Decimal64Dtype(2, 1), Decimal("5.5"), cudf.Series([True, True, True]), ["5.5", "5.5", "5.5"], ), ( ["1.0042", "2.0042", "3.0042"], Decimal64Dtype(5, 4), Decimal("5.0042"), cudf.Series([False, False, True]), ["1.0042", "2.0042", "5.0042"], ), # We will allow assigning a decimal with less precision ( ["1.00", "2.00", "3.00"], Decimal64Dtype(3, 2), Decimal(5), 1, ["1.00", "5.00", "3.00"], ), # But not truncation ( ["1", "2", "3"], Decimal64Dtype(1, 0), Decimal("5.5"), 1, pa.lib.ArrowInvalid, ), # We will allow for setting scalars into decimal columns (["1", "2", "3"], Decimal64Dtype(1, 0), 5, 1, ["1", "5", "3"]), # But not if it has too many digits to fit the precision (["1", "2", "3"], Decimal64Dtype(1, 0), 50, 1, pa.lib.ArrowInvalid), ], ) def test_series_setitem_decimal(args): data, dtype, item, to, expect = args data = _decimal_series(data, dtype) if expect is pa.lib.ArrowInvalid: with pytest.raises(expect): data[to] = item return else: expect = _decimal_series(expect, dtype) data[to] = item assert_eq(data, expect) @pytest.mark.parametrize( "input_obj", [[decimal.Decimal(1), cudf.NA, decimal.Decimal(3)]] ) def test_series_construction_with_nulls(input_obj): expect = pa.array(input_obj, from_pandas=True) got = cudf.Series(input_obj).to_arrow() assert expect == got @pytest.mark.parametrize( "data", [ { "a": _decimal_series( ["1", "2", "3"], dtype=cudf.Decimal64Dtype(1, 0) ) }, { "a": _decimal_series( ["1", "2", "3"], dtype=cudf.Decimal64Dtype(1, 0) ), "b": _decimal_series( ["1.0", "2.0", "3.0"], dtype=cudf.Decimal64Dtype(2, 1) ), "c": _decimal_series( ["10.1", "20.2", "30.3"], dtype=cudf.Decimal64Dtype(3, 1) ), }, { "a": _decimal_series( ["1", None, "3"], dtype=cudf.Decimal64Dtype(1, 0) ), "b": _decimal_series( ["1.0", "2.0", None], dtype=cudf.Decimal64Dtype(2, 1) ), "c": _decimal_series( [None, "20.2", "30.3"], dtype=cudf.Decimal64Dtype(3, 1) ), }, ], ) def test_serialize_decimal_columns(data): df = cudf.DataFrame(data) recreated = df.__class__.deserialize(*df.serialize()) assert_eq(recreated, df) def test_decimal_invalid_precision(): with pytest.raises(pa.ArrowInvalid): _ = cudf.Series([10, 20, 30], dtype=cudf.Decimal64Dtype(2, 2)) with pytest.raises(pa.ArrowInvalid): _ = cudf.Series([Decimal("300")], dtype=cudf.Decimal64Dtype(2, 1)) def test_decimal_overflow(): s = cudf.Series([Decimal("0.0009384233522166997927180531650178250")]) result = s * s assert_eq(cudf.Decimal128Dtype(precision=38, scale=37), result.dtype) s = cudf.Series([1, 2], dtype=cudf.Decimal128Dtype(precision=38, scale=0)) result = s * Decimal("1.0") assert_eq(cudf.Decimal128Dtype(precision=38, scale=1), result.dtype)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_pandas_interop.py
# Copyright (c) 2018-2021, NVIDIA CORPORATION. import numpy as np import pandas as pd import cudf from cudf import DataFrame from cudf.testing._utils import assert_eq def test_to_pandas(): df = DataFrame() df["a"] = np.arange(5, dtype=np.int32) df["b"] = np.arange(10, 15, dtype=np.float64) df["c"] = np.array([True, False, None, True, True]) pdf = df.to_pandas() assert tuple(df.columns) == tuple(pdf.columns) assert df["a"].dtype == pdf["a"].dtype assert df["b"].dtype == pdf["b"].dtype # Notice, the dtype differ when Pandas and cudf boolean series # contains None/NaN assert df["c"].dtype == np.bool_ assert pdf["c"].dtype == np.object_ assert len(df["a"]) == len(pdf["a"]) assert len(df["b"]) == len(pdf["b"]) assert len(df["c"]) == len(pdf["c"]) def test_from_pandas(): pdf = pd.DataFrame() pdf["a"] = np.arange(10, dtype=np.int32) pdf["b"] = np.arange(10, 20, dtype=np.float64) df = DataFrame.from_pandas(pdf) assert tuple(df.columns) == tuple(pdf.columns) assert df["a"].dtype == pdf["a"].dtype assert df["b"].dtype == pdf["b"].dtype assert len(df["a"]) == len(pdf["a"]) assert len(df["b"]) == len(pdf["b"]) def test_from_pandas_ex1(): pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]}) df = DataFrame.from_pandas(pdf) assert tuple(df.columns) == tuple(pdf.columns) assert np.all(df["a"].to_numpy() == pdf["a"]) matches = df["b"].to_numpy(na_value=np.nan) == pdf["b"] # the 3d element is False due to (nan == nan) == False assert np.all(matches == [True, True, False, True]) assert np.isnan(df["b"].to_numpy(na_value=np.nan)[2]) assert np.isnan(pdf["b"][2]) def test_from_pandas_with_index(): pdf = pd.DataFrame({"a": [0, 1, 2, 3], "b": [0.1, 0.2, None, 0.3]}) pdf = pdf.set_index(np.asarray([4, 3, 2, 1])) df = DataFrame.from_pandas(pdf) # Check columns assert_eq(df.a, pdf.a) assert_eq(df.b, pdf.b) # Check index assert_eq(df.index.values, pdf.index.values) # Check again using pandas testing tool on frames assert_eq(df, pdf) def test_from_pandas_rangeindex(): idx1 = pd.RangeIndex(start=0, stop=4, step=1, name="myindex") idx2 = cudf.from_pandas(idx1) # Check index assert_eq(idx1.values, idx2.values) assert idx1.name == idx2.name def test_from_pandas_rangeindex_step(): expected = pd.RangeIndex(start=0, stop=8, step=2, name="myindex") actual = cudf.from_pandas(expected) assert_eq(expected, actual)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_joining.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. from itertools import combinations, product, repeat import numpy as np import pandas as pd import pytest import cudf from cudf.core.dtypes import CategoricalDtype, Decimal64Dtype, Decimal128Dtype from cudf.testing._utils import ( INTEGER_TYPES, NUMERIC_TYPES, TIMEDELTA_TYPES, assert_eq, assert_exceptions_equal, expect_warning_if, ) _JOIN_TYPES = ("left", "inner", "outer", "right", "leftanti", "leftsemi") def make_params(): np.random.seed(0) hows = _JOIN_TYPES # Test specific cases (1) aa = [0, 0, 4, 5, 5] bb = [0, 0, 2, 3, 5] for how in hows: yield (aa, bb, how) # Test specific cases (2) aa = [0, 0, 1, 2, 3] bb = [0, 1, 2, 2, 3] for how in hows: yield (aa, bb, how) # Test large random integer inputs aa = np.random.randint(0, 50, 100) bb = np.random.randint(0, 50, 100) for how in hows: yield (aa, bb, how) # Test floating point inputs aa = np.random.random(50) bb = np.random.random(50) for how in hows: yield (aa, bb, how) def pd_odd_joins(left, right, join_type): if join_type == "leftanti": return left[~left.index.isin(right.index)][left.columns] elif join_type == "leftsemi": return left[left.index.isin(right.index)][left.columns] def assert_join_results_equal(expect, got, how, **kwargs): if how not in _JOIN_TYPES: raise ValueError(f"Unrecognized join type {how}") if how == "right": got = got[expect.columns] if isinstance(expect, (pd.Series, cudf.Series)): return assert_eq( expect.sort_values().reset_index(drop=True), got.sort_values().reset_index(drop=True), **kwargs, ) elif isinstance(expect, (pd.DataFrame, cudf.DataFrame)): if not len( expect.columns ): # can't sort_values() on a df without columns return assert_eq(expect, got, **kwargs) assert_eq( expect.sort_values(expect.columns.to_list()).reset_index( drop=True ), got.sort_values(got.columns.to_list()).reset_index(drop=True), **kwargs, ) elif isinstance(expect, (pd.Index, cudf.Index)): return assert_eq(expect.sort_values(), got.sort_values(), **kwargs) else: raise ValueError(f"Not a join result: {type(expect).__name__}") @pytest.mark.parametrize("aa,bb,how", make_params()) def test_dataframe_join_how(aa, bb, how): df = cudf.DataFrame() df["a"] = aa df["b"] = bb def work_pandas(df, how): df1 = df.set_index("a") df2 = df.set_index("b") if how == "leftanti": joined = pd_odd_joins(df1, df2, "leftanti") elif how == "leftsemi": joined = pd_odd_joins(df1, df2, "leftsemi") else: joined = df1.join(df2, how=how, sort=True) return joined def work_gdf(df): df1 = df.set_index("a") df2 = df.set_index("b") joined = df1.join(df2, how=how, sort=True) return joined expect = work_pandas(df.to_pandas(), how) got = work_gdf(df) expecto = expect.copy() goto = got.copy() expect = expect.astype(np.float64).fillna(np.nan)[expect.columns] got = got.astype(np.float64).fillna(np.nan)[expect.columns] assert got.index.name is None assert list(expect.columns) == list(got.columns) if how in {"left", "inner", "right", "leftanti", "leftsemi"}: assert_eq(sorted(expect.index.values), sorted(got.index.values)) if how != "outer": # Newly introduced ambiguous ValueError thrown when # an index and column have the same name. Rename the # index so sorts work. # TODO: What is the less hacky way? expect.index.name = "bob" got.index.name = "mary" assert_join_results_equal(expect, got, how=how) # if(how=='right'): # _sorted_check_series(expect['a'], expect['b'], # got['a'], got['b']) # else: # _sorted_check_series(expect['b'], expect['a'], got['b'], # got['a']) else: for c in expecto.columns: _check_series(expecto[c].fillna(-1), goto[c].fillna(-1)) def _check_series(expect, got): magic = 0xDEADBEAF direct_equal = np.all(expect.values == got.to_numpy()) nanfilled_equal = np.all( expect.fillna(magic).values == got.fillna(magic).to_numpy() ) msg = "direct_equal={}, nanfilled_equal={}".format( direct_equal, nanfilled_equal ) assert direct_equal or nanfilled_equal, msg def test_dataframe_join_suffix(): np.random.seed(0) df = cudf.DataFrame() for k in "abc": df[k] = np.random.randint(0, 5, 5) left = df.set_index("a") right = df.set_index("c") with pytest.raises(ValueError) as raises: left.join(right) raises.match( "there are overlapping columns but lsuffix" " and rsuffix are not defined" ) got = left.join(right, lsuffix="_left", rsuffix="_right", sort=True) # Get expected value pddf = df.to_pandas() expect = pddf.set_index("a").join( pddf.set_index("c"), lsuffix="_left", rsuffix="_right" ) # Check assert list(expect.columns) == list(got.columns) assert_eq(expect.index.values, got.index.values) got_sorted = got.sort_values(by=list(got.columns), axis=0) expect_sorted = expect.sort_values(by=list(expect.columns), axis=0) for k in expect_sorted.columns: _check_series(expect_sorted[k].fillna(-1), got_sorted[k].fillna(-1)) def test_dataframe_join_cats(): lhs = cudf.DataFrame() lhs["a"] = pd.Categorical(list("aababcabbc"), categories=list("abc")) lhs["b"] = bb = np.arange(len(lhs)) lhs = lhs.set_index("a") rhs = cudf.DataFrame() rhs["a"] = pd.Categorical(list("abcac"), categories=list("abc")) rhs["c"] = cc = np.arange(len(rhs)) rhs = rhs.set_index("a") got = lhs.join(rhs) expect = lhs.to_pandas().join(rhs.to_pandas()) # Note: pandas make an object Index after joining assert_join_results_equal(expect, got, how="inner") # Just do some rough checking here. assert list(got.columns) == ["b", "c"] assert len(got) > 0 assert set(got.index.to_pandas()) & set("abc") assert set(got["b"].to_numpy()) & set(bb) assert set(got["c"].to_numpy()) & set(cc) def test_dataframe_join_combine_cats(): lhs = cudf.DataFrame({"join_index": ["a", "b", "c"], "data_x": [1, 2, 3]}) rhs = cudf.DataFrame({"join_index": ["b", "c", "d"], "data_y": [2, 3, 4]}) lhs["join_index"] = lhs["join_index"].astype("category") rhs["join_index"] = rhs["join_index"].astype("category") lhs = lhs.set_index("join_index") rhs = rhs.set_index("join_index") lhs_pd = lhs.to_pandas() rhs_pd = rhs.to_pandas() lhs_pd.index = lhs_pd.index.astype("object") rhs_pd.index = rhs_pd.index.astype("object") expect = lhs_pd.join(rhs_pd, how="outer") expect.index = expect.index.astype("category") got = lhs.join(rhs, how="outer") assert_eq(expect.index.sort_values(), got.index.sort_values()) @pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) def test_dataframe_join_mismatch_cats(how): pdf1 = pd.DataFrame( { "join_col": ["a", "b", "c", "d", "e"], "data_col_left": [10, 20, 30, 40, 50], } ) pdf2 = pd.DataFrame( {"join_col": ["c", "e", "f"], "data_col_right": [6, 7, 8]} ) pdf1["join_col"] = pdf1["join_col"].astype("category") pdf2["join_col"] = pdf2["join_col"].astype("category") gdf1 = cudf.from_pandas(pdf1) gdf2 = cudf.from_pandas(pdf2) gdf1 = gdf1.set_index("join_col") gdf2 = gdf2.set_index("join_col") pdf1 = pdf1.set_index("join_col") pdf2 = pdf2.set_index("join_col") join_gdf = gdf1.join(gdf2, how=how, sort=True) join_pdf = pdf1.join(pdf2, how=how) got = join_gdf.fillna(-1).to_pandas() expect = join_pdf.fillna(-1) # note: cudf join doesn't mask NA # We yield a categorical here whereas pandas gives Object. expect.index = expect.index.astype("category") # cudf creates the columns in different order than pandas for right join if how == "right": got = got[["data_col_left", "data_col_right"]] expect.data_col_right = expect.data_col_right.astype(np.int64) expect.data_col_left = expect.data_col_left.astype(np.int64) assert_join_results_equal(expect, got, how=how, check_categorical=False) @pytest.mark.parametrize("on", ["key1", ["key1", "key2"], None]) def test_dataframe_merge_on(on): np.random.seed(0) # Make cuDF df_left = cudf.DataFrame() nelem = 500 df_left["key1"] = np.random.randint(0, 40, nelem) df_left["key2"] = np.random.randint(0, 50, nelem) df_left["left_val"] = np.arange(nelem) df_right = cudf.DataFrame() nelem = 500 df_right["key1"] = np.random.randint(0, 30, nelem) df_right["key2"] = np.random.randint(0, 50, nelem) df_right["right_val"] = np.arange(nelem) # Make pandas DF pddf_left = df_left.to_pandas() pddf_right = df_right.to_pandas() # Expected result (from pandas) pddf_joined = pddf_left.merge(pddf_right, on=on, how="left") # Test (from cuDF; doesn't check for ordering) join_result = df_left.merge(df_right, on=on, how="left") join_result_cudf = cudf.merge(df_left, df_right, on=on, how="left") join_result["right_val"] = ( join_result["right_val"].astype(np.float64).fillna(np.nan) ) join_result_cudf["right_val"] = ( join_result_cudf["right_val"].astype(np.float64).fillna(np.nan) ) for col in list(pddf_joined.columns): if col.count("_y") > 0: join_result[col] = ( join_result[col].astype(np.float64).fillna(np.nan) ) join_result_cudf[col] = ( join_result_cudf[col].astype(np.float64).fillna(np.nan) ) # Test dataframe equality (ignore order of rows and columns) cdf_result = ( join_result.to_pandas() .sort_values(list(pddf_joined.columns)) .reset_index(drop=True) ) pdf_result = pddf_joined.sort_values( list(pddf_joined.columns) ).reset_index(drop=True) assert_join_results_equal(cdf_result, pdf_result, how="left") merge_func_result_cdf = ( join_result_cudf.to_pandas() .sort_values(list(pddf_joined.columns)) .reset_index(drop=True) ) assert_join_results_equal(merge_func_result_cdf, cdf_result, how="left") def test_dataframe_merge_on_unknown_column(): np.random.seed(0) # Make cuDF df_left = cudf.DataFrame() nelem = 500 df_left["key1"] = np.random.randint(0, 40, nelem) df_left["key2"] = np.random.randint(0, 50, nelem) df_left["left_val"] = np.arange(nelem) df_right = cudf.DataFrame() nelem = 500 df_right["key1"] = np.random.randint(0, 30, nelem) df_right["key2"] = np.random.randint(0, 50, nelem) df_right["right_val"] = np.arange(nelem) with pytest.raises(KeyError) as raises: df_left.merge(df_right, on="bad_key", how="left") raises.match("bad_key") def test_dataframe_merge_no_common_column(): np.random.seed(0) # Make cuDF df_left = cudf.DataFrame() nelem = 500 df_left["key1"] = np.random.randint(0, 40, nelem) df_left["key2"] = np.random.randint(0, 50, nelem) df_left["left_val"] = np.arange(nelem) df_right = cudf.DataFrame() nelem = 500 df_right["key3"] = np.random.randint(0, 30, nelem) df_right["key4"] = np.random.randint(0, 50, nelem) df_right["right_val"] = np.arange(nelem) with pytest.raises(ValueError) as raises: df_left.merge(df_right, how="left") raises.match("No common columns to perform merge on") def test_dataframe_empty_merge(): gdf1 = cudf.DataFrame({"a": [], "b": []}) gdf2 = cudf.DataFrame({"a": [], "c": []}) expect = cudf.DataFrame({"a": [], "b": [], "c": []}) got = gdf1.merge(gdf2, how="left", on=["a"]) assert_join_results_equal(expect, got, how="left") def test_dataframe_merge_order(): gdf1 = cudf.DataFrame() gdf2 = cudf.DataFrame() gdf1["id"] = [10, 11] gdf1["timestamp"] = [1, 2] gdf1["a"] = [3, 4] gdf2["id"] = [4, 5] gdf2["a"] = [7, 8] gdf = gdf1.merge(gdf2, how="left", on=["id", "a"]) df1 = pd.DataFrame() df2 = pd.DataFrame() df1["id"] = [10, 11] df1["timestamp"] = [1, 2] df1["a"] = [3, 4] df2["id"] = [4, 5] df2["a"] = [7, 8] df = df1.merge(df2, how="left", on=["id", "a"]) assert_join_results_equal(df, gdf, how="left") @pytest.mark.parametrize( "pairs", [ ("", ""), ("", "a"), ("", "ab"), ("", "abc"), ("", "b"), ("", "bcd"), ("", "cde"), ("a", "a"), ("a", "ab"), ("a", "abc"), ("a", "b"), ("a", "bcd"), ("a", "cde"), ("ab", "ab"), ("ab", "abc"), ("ab", "b"), ("ab", "bcd"), ("ab", "cde"), ("abc", "abc"), ("abc", "b"), ("abc", "bcd"), ("abc", "cde"), ("b", "b"), ("b", "bcd"), ("b", "cde"), ("bcd", "bcd"), ("bcd", "cde"), ("cde", "cde"), ], ) @pytest.mark.parametrize("max", [5, 1000]) @pytest.mark.parametrize("rows", [1, 5, 100]) @pytest.mark.parametrize("how", ["left", "inner", "outer"]) def test_dataframe_pairs_of_triples(pairs, max, rows, how): np.random.seed(0) pdf_left = pd.DataFrame() pdf_right = pd.DataFrame() for left_column in pairs[0]: pdf_left[left_column] = np.random.randint(0, max, rows) for right_column in pairs[1]: pdf_right[right_column] = np.random.randint(0, max, rows) gdf_left = cudf.from_pandas(pdf_left) gdf_right = cudf.from_pandas(pdf_right) if not set(pdf_left.columns).intersection(pdf_right.columns): with pytest.raises( pd.errors.MergeError, match="No common columns to perform merge on", ): pdf_left.merge(pdf_right) with pytest.raises( ValueError, match="No common columns to perform merge on" ): gdf_left.merge(gdf_right) elif not [value for value in pdf_left if value in pdf_right]: with pytest.raises( pd.errors.MergeError, match="No common columns to perform merge on", ): pdf_left.merge(pdf_right) with pytest.raises( ValueError, match="No common columns to perform merge on" ): gdf_left.merge(gdf_right) else: pdf_result = pdf_left.merge(pdf_right, how=how) gdf_result = gdf_left.merge(gdf_right, how=how) assert np.array_equal(gdf_result.columns, pdf_result.columns) for column in gdf_result: gdf_col_result_sorted = gdf_result[column].fillna(-1).sort_values() pd_col_result_sorted = pdf_result[column].fillna(-1).sort_values() assert np.array_equal( gdf_col_result_sorted.to_pandas().values, pd_col_result_sorted.values, ) def test_safe_merging_with_left_empty(): np.random.seed(0) pairs = ("bcd", "b") pdf_left = pd.DataFrame() pdf_right = pd.DataFrame() for left_column in pairs[0]: pdf_left[left_column] = np.random.randint(0, 10, 0) for right_column in pairs[1]: pdf_right[right_column] = np.random.randint(0, 10, 5) gdf_left = cudf.from_pandas(pdf_left) gdf_right = cudf.from_pandas(pdf_right) pdf_result = pdf_left.merge(pdf_right) gdf_result = gdf_left.merge(gdf_right) # Simplify test because pandas does not consider empty Index and RangeIndex # to be equivalent. TODO: Allow empty Index objects to have equivalence. assert len(pdf_result) == len(gdf_result) @pytest.mark.parametrize("how", ["left", "inner", "outer"]) @pytest.mark.parametrize("left_empty", [True, False]) @pytest.mark.parametrize("right_empty", [True, False]) def test_empty_joins(how, left_empty, right_empty): pdf = pd.DataFrame({"x": [1, 2, 3]}) if left_empty: left = pdf.head(0) else: left = pdf if right_empty: right = pdf.head(0) else: right = pdf gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) expected = left.merge(right, how=how) result = gleft.merge(gright, how=how) assert len(expected) == len(result) def test_merge_left_index_zero(): left = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6]}, index=[0, 1, 2, 3, 4, 5]) right = pd.DataFrame( {"y": [10, 20, 30, 6, 5, 4]}, index=[0, 1, 2, 3, 4, 6] ) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) pd_merge = left.merge(right, left_on="x", right_on="y") gd_merge = gleft.merge(gright, left_on="x", right_on="y") assert_join_results_equal(pd_merge, gd_merge, how="left") @pytest.mark.parametrize( "kwargs", [ {"left_index": True, "right_on": "y"}, {"right_index": True, "left_on": "x"}, {"left_on": "x", "right_on": "y"}, {"left_index": True, "right_index": True}, ], ) def test_merge_left_right_index_left_right_on_zero_kwargs(kwargs): left = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6]}, index=[0, 1, 2, 3, 4, 5]) right = pd.DataFrame( {"y": [10, 20, 30, 6, 5, 4]}, index=[0, 1, 2, 3, 4, 6] ) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) pd_merge = left.merge(right, **kwargs) gd_merge = gleft.merge(gright, **kwargs) assert_join_results_equal(pd_merge, gd_merge, how="left") @pytest.mark.parametrize( "kwargs", [ {"left_index": True, "right_on": "y"}, {"right_index": True, "left_on": "x"}, {"left_on": "x", "right_on": "y"}, {"left_index": True, "right_index": True}, ], ) def test_merge_left_right_index_left_right_on_kwargs(kwargs): left = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6]}, index=[1, 2, 3, 4, 5, 6]) right = pd.DataFrame( {"y": [10, 20, 30, 6, 5, 4]}, index=[1, 2, 3, 4, 5, 7] ) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) pd_merge = left.merge(right, **kwargs) gd_merge = gleft.merge(gright, **kwargs) assert_join_results_equal(pd_merge, gd_merge, how="left") def test_indicator(): gdf = cudf.DataFrame({"x": [1, 2, 1]}) gdf.merge(gdf, indicator=False) with pytest.raises(NotImplementedError) as info: gdf.merge(gdf, indicator=True) assert "indicator=False" in str(info.value) def test_merge_suffixes(): pdf = cudf.DataFrame({"x": [1, 2, 1]}) gdf = cudf.DataFrame({"x": [1, 2, 1]}) assert_join_results_equal( gdf.merge(gdf, suffixes=("left", "right")), pdf.merge(pdf, suffixes=("left", "right")), how="left", ) assert_exceptions_equal( lfunc=pdf.merge, rfunc=gdf.merge, lfunc_args_and_kwargs=([pdf], {"lsuffix": "left", "rsuffix": "right"}), rfunc_args_and_kwargs=([gdf], {"lsuffix": "left", "rsuffix": "right"}), ) def test_merge_left_on_right_on(): left = pd.DataFrame({"xx": [1, 2, 3, 4, 5, 6]}) right = pd.DataFrame({"xx": [10, 20, 30, 6, 5, 4]}) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) assert_join_results_equal( left.merge(right, on="xx"), gleft.merge(gright, on="xx"), how="left" ) assert_join_results_equal( left.merge(right, left_on="xx", right_on="xx"), gleft.merge(gright, left_on="xx", right_on="xx"), how="left", ) def test_merge_on_index_retained(): df = cudf.DataFrame() df["a"] = [1, 2, 3, 4, 5] df["b"] = ["a", "b", "c", "d", "e"] df.index = [5, 3, 4, 2, 1] df2 = cudf.DataFrame() df2["a2"] = [1, 2, 3, 4, 5] df2["res"] = ["a", "b", "c", "d", "e"] pdf = df.to_pandas() pdf2 = df2.to_pandas() gdm = df.merge(df2, left_index=True, right_index=True, how="left") pdm = pdf.merge(pdf2, left_index=True, right_index=True, how="left") gdm["a2"] = gdm["a2"].astype("float64") assert_eq(gdm.sort_index(), pdm.sort_index()) @pytest.mark.parametrize( "kwargs", [ {"left_index": True, "right_on": "y"}, {"right_index": True, "left_on": "x"}, {"left_on": "x", "right_on": "y"}, ], ) def test_merge_left_right_index_left_right_on_kwargs2(kwargs): left = pd.DataFrame({"x": [1, 2, 3]}, index=[10, 20, 30]) right = pd.DataFrame({"y": [10, 20, 30]}, index=[1, 2, 30]) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) gd_merge = gleft.merge(gright, **kwargs) pd_merge = left.merge(right, **kwargs) if pd_merge.empty: assert gd_merge.empty @pytest.mark.parametrize( "hows", [{"how": "inner"}, {"how": "left"}, {"how": "outer"}] ) @pytest.mark.parametrize( "ons", [ {"on": "a"}, {"on": ["a", "b"]}, {"on": ["b", "a"]}, {"on": ["a", "aa", "b"]}, {"on": ["b", "a", "aa"]}, ], ) def test_merge_sort(ons, hows): kwargs = {} kwargs.update(hows) kwargs.update(ons) kwargs["sort"] = True a = [4, 6, 9, 5, 2, 4, 1, 8, 1] b = [9, 8, 7, 8, 3, 9, 7, 9, 2] aa = [8, 9, 2, 9, 3, 1, 2, 3, 4] left = pd.DataFrame({"a": a, "b": b, "aa": aa}) right = left.copy(deep=True) left.index = [6, 5, 4, 7, 5, 5, 5, 4, 4] right.index = [5, 4, 1, 9, 4, 3, 5, 4, 4] gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) gd_merge = gleft.merge(gright, **kwargs) pd_merge = left.merge(right, **kwargs) # require the join keys themselves to be sorted correctly # the non-key columns will NOT match pandas ordering assert_join_results_equal( pd_merge[kwargs["on"]], gd_merge[kwargs["on"]], how="left" ) pd_merge = pd_merge.drop(kwargs["on"], axis=1) gd_merge = gd_merge.drop(kwargs["on"], axis=1) if not pd_merge.empty: # check to make sure the non join key columns are the same pd_merge = pd_merge.sort_values(list(pd_merge.columns)).reset_index( drop=True ) gd_merge = gd_merge.sort_values(list(gd_merge.columns)).reset_index( drop=True ) assert_join_results_equal(pd_merge, gd_merge, how="left") @pytest.mark.parametrize( "kwargs", [ {"left_on": ["a"], "left_index": False, "right_index": True}, {"right_on": ["b"], "left_index": True, "right_index": False}, ], ) def test_merge_sort_on_indexes(kwargs): left_index = kwargs["left_index"] right_index = kwargs["right_index"] kwargs["sort"] = True a = [4, 6, 9, 5, 2, 4, 1, 8, 1] left = pd.DataFrame({"a": a}) right = pd.DataFrame({"b": a}) left.index = [6, 5, 4, 7, 5, 5, 5, 4, 4] right.index = [5, 4, 1, 9, 4, 3, 5, 4, 4] gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) gd_merge = gleft.merge(gright, **kwargs) if left_index and right_index: check_if_sorted = gd_merge[["a", "b"]].to_pandas() check_if_sorted.index.name = "index" definitely_sorted = check_if_sorted.sort_values(["index", "a", "b"]) definitely_sorted.index.name = None assert_eq(gd_merge, definitely_sorted) elif left_index: assert gd_merge["b"].is_monotonic_increasing elif right_index: assert gd_merge["a"].is_monotonic_increasing @pytest.mark.parametrize( "dtype", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_join_datetimes_index(dtype): datetimes = pd.Series(pd.date_range("20010101", "20010102", freq="12h")) pdf_lhs = pd.DataFrame(index=[1, 0, 1, 2, 0, 0, 1]) pdf_rhs = pd.DataFrame({"d": datetimes}) gdf_lhs = cudf.from_pandas(pdf_lhs) gdf_rhs = cudf.from_pandas(pdf_rhs) gdf_rhs["d"] = gdf_rhs["d"].astype(dtype) pdf = pdf_lhs.join(pdf_rhs, sort=True) gdf = gdf_lhs.join(gdf_rhs, sort=True) assert gdf["d"].dtype == cudf.dtype(dtype) assert_join_results_equal(pdf, gdf, how="inner") def test_join_with_different_names(): left = pd.DataFrame({"a": [0, 1, 2.0, 3, 4, 5, 9]}) right = pd.DataFrame({"b": [12, 5, 3, 9.0, 5], "c": [1, 2, 3, 4, 5.0]}) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) pd_merge = left.merge(right, how="outer", left_on=["a"], right_on=["b"]) gd_merge = gleft.merge(gright, how="outer", left_on=["a"], right_on=["b"]) assert_join_results_equal(pd_merge, gd_merge, how="outer") def test_join_same_name_different_order(): left = pd.DataFrame({"a": [0, 0], "b": [1, 2]}) right = pd.DataFrame({"a": [1, 2], "b": [0, 0]}) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) pd_merge = left.merge(right, left_on=["a", "b"], right_on=["b", "a"]) gd_merge = gleft.merge(gright, left_on=["a", "b"], right_on=["b", "a"]) assert_join_results_equal(pd_merge, gd_merge, how="left") def test_join_empty_table_dtype(): left = pd.DataFrame({"a": []}) right = pd.DataFrame({"b": [12, 5, 3, 9.0, 5], "c": [1, 2, 3, 4, 5.0]}) gleft = cudf.from_pandas(left) gright = cudf.from_pandas(right) pd_merge = left.merge(right, how="left", left_on=["a"], right_on=["b"]) gd_merge = gleft.merge(gright, how="left", left_on=["a"], right_on=["b"]) assert_eq(pd_merge["a"].dtype, gd_merge["a"].dtype) @pytest.mark.parametrize("how", ["outer", "inner", "left", "right"]) @pytest.mark.parametrize( "column_a", [ ( pd.Series([None, 1, 2, 3, 4, 5, 6, 7], dtype=np.float64), pd.Series([8, 9, 10, 11, 12, None, 14, 15], dtype=np.float64), ) ], ) @pytest.mark.parametrize( "column_b", [ ( pd.Series([0, 1, 0, None, 1, 0, 0, 0], dtype=np.float64), pd.Series([None, 1, 2, 1, 2, 2, 0, 0], dtype=np.float64), ) ], ) @pytest.mark.parametrize( "column_c", [ ( pd.Series(["dog", "cat", "fish", "bug"] * 2), pd.Series(["bird", "cat", "mouse", "snake"] * 2), ), ( pd.Series(["dog", "cat", "fish", "bug"] * 2).astype("category"), pd.Series(["bird", "cat", "mouse", "snake"] * 2).astype( "category" ), ), ], ) def test_join_multi(how, column_a, column_b, column_c): index = ["b", "c"] df1 = pd.DataFrame() df1["a1"] = column_a[0] df1["b"] = column_b[0] df1["c"] = column_c[0] df1 = df1.set_index(index) gdf1 = cudf.from_pandas(df1) df2 = pd.DataFrame() df2["a2"] = column_a[1] df2["b"] = column_b[1] df2["c"] = column_c[1] df2 = df2.set_index(index) gdf2 = cudf.from_pandas(df2) gdf_result = gdf1.join(gdf2, how=how, sort=True) pdf_result = df1.join(df2, how=how, sort=True) # Make sure columns are in the same order columns = pdf_result.columns.values gdf_result = gdf_result[columns] pdf_result = pdf_result[columns] assert_join_results_equal(pdf_result, gdf_result, how="inner") @pytest.mark.parametrize( "kwargs", [ { "left_on": ["a", "b"], "right_on": ["a", "b"], "left_index": False, "right_index": False, }, # left and right on, no indices { "left_on": None, "right_on": None, "left_index": True, "right_index": True, }, # left_index and right_index, no on { "left_on": ["a", "b"], "right_on": None, "left_index": False, "right_index": True, }, # left on and right_index { "left_on": None, "right_on": ["a", "b"], "left_index": True, "right_index": False, }, # right_on and left_index ], ) def test_merge_multi(kwargs): left = cudf.DataFrame( { "a": [1, 2, 3, 4, 3, 5, 6], "b": [1, 3, 5, 7, 5, 9, 0], "c": ["o", "p", "q", "r", "s", "t", "u"], "d": ["v", "w", "x", "y", "z", "1", "2"], } ) right = cudf.DataFrame( { "a": [0, 9, 3, 4, 3, 7, 8], "b": [2, 4, 5, 7, 5, 6, 8], "c": ["a", "b", "c", "d", "e", "f", "g"], "d": ["j", "i", "j", "k", "l", "m", "n"], } ) if ( kwargs["left_on"] is not None and kwargs["right_on"] is not None and kwargs["left_index"] is False and kwargs["right_index"] is False ): left = left.set_index(["c", "d"]) right = right.set_index(["c", "d"]) elif ( kwargs["left_on"] is None and kwargs["right_on"] is None and kwargs["left_index"] is True and kwargs["right_index"] is True ): left = left.set_index(["a", "b"]) right = right.set_index(["a", "b"]) elif kwargs["left_on"] is not None and kwargs["right_index"] is True: left = left.set_index(["c", "d"]) right = right.set_index(["a", "b"]) elif kwargs["right_on"] is not None and kwargs["left_index"] is True: left = left.set_index(["a", "b"]) right = right.set_index(["c", "d"]) gleft = left.to_pandas() gright = right.to_pandas() kwargs["sort"] = True expect = gleft.merge(gright, **kwargs) got = left.merge(right, **kwargs) assert_eq(expect.sort_index().index, got.sort_index().index) expect.index = range(len(expect)) got.index = range(len(got)) expect = expect.sort_values(list(expect.columns)) got = got.sort_values(list(got.columns)) expect.index = range(len(expect)) got.index = range(len(got)) assert_join_results_equal(expect, got, how="left") @pytest.mark.parametrize("dtype_l", INTEGER_TYPES) @pytest.mark.parametrize("dtype_r", INTEGER_TYPES) def test_typecast_on_join_int_to_int(dtype_l, dtype_r): other_data = ["a", "b", "c"] join_data_l = cudf.Series([1, 2, 3], dtype=dtype_l) join_data_r = cudf.Series([1, 2, 4], dtype=dtype_r) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_dtype = np.find_common_type([], [np.dtype(dtype_l), np.dtype(dtype_r)]) exp_join_data = [1, 2] exp_other_data = ["a", "b"] exp_join_col = cudf.Series(exp_join_data, dtype=exp_dtype) expect = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data, "B_y": exp_other_data, } ) got = gdf_l.merge(gdf_r, on="join_col", how="inner") assert_join_results_equal(expect, got, how="inner") @pytest.mark.parametrize("dtype_l", ["float32", "float64"]) @pytest.mark.parametrize("dtype_r", ["float32", "float64"]) def test_typecast_on_join_float_to_float(dtype_l, dtype_r): other_data = ["a", "b", "c", "d", "e", "f"] join_data_l = cudf.Series([1, 2, 3, 0.9, 4.5, 6], dtype=dtype_l) join_data_r = cudf.Series([1, 2, 3, 0.9, 4.5, 7], dtype=dtype_r) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_dtype = np.find_common_type([], [np.dtype(dtype_l), np.dtype(dtype_r)]) if dtype_l != dtype_r: exp_join_data = [1, 2, 3, 4.5] exp_other_data = ["a", "b", "c", "e"] else: exp_join_data = [1, 2, 3, 0.9, 4.5] exp_other_data = ["a", "b", "c", "d", "e"] exp_join_col = cudf.Series(exp_join_data, dtype=exp_dtype) expect = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data, "B_y": exp_other_data, } ) got = gdf_l.merge(gdf_r, on="join_col", how="inner") assert_join_results_equal(expect, got, how="inner") @pytest.mark.parametrize("dtype_l", NUMERIC_TYPES) @pytest.mark.parametrize("dtype_r", NUMERIC_TYPES) def test_typecast_on_join_mixed_int_float(dtype_l, dtype_r): if ( ("int" in dtype_l or "long" in dtype_l) and ("int" in dtype_r or "long" in dtype_r) ) or ("float" in dtype_l and "float" in dtype_r): pytest.skip("like types not tested in this function") other_data = ["a", "b", "c", "d", "e", "f"] join_data_l = cudf.Series([1, 2, 3, 0.9, 4.5, 6], dtype=dtype_l) join_data_r = cudf.Series([1, 2, 3, 0.9, 4.5, 7], dtype=dtype_r) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_dtype = np.find_common_type([], [np.dtype(dtype_l), np.dtype(dtype_r)]) exp_join_data = [1, 2, 3] exp_other_data = ["a", "b", "c"] exp_join_col = cudf.Series(exp_join_data, dtype=exp_dtype) expect = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data, "B_y": exp_other_data, } ) got = gdf_l.merge(gdf_r, on="join_col", how="inner") assert_join_results_equal(expect, got, how="inner") def test_typecast_on_join_no_float_round(): other_data = ["a", "b", "c", "d", "e"] join_data_l = cudf.Series([1, 2, 3, 4, 5], dtype="int8") join_data_r = cudf.Series([1, 2, 3, 4.01, 4.99], dtype="float32") gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_join_data = [1, 2, 3, 4, 5] exp_Bx = ["a", "b", "c", "d", "e"] exp_By = ["a", "b", "c", None, None] exp_join_col = cudf.Series(exp_join_data, dtype="float32") expect = cudf.DataFrame( {"join_col": exp_join_col, "B_x": exp_Bx, "B_y": exp_By} ) got = gdf_l.merge(gdf_r, on="join_col", how="left") assert_join_results_equal(expect, got, how="left") @pytest.mark.parametrize( "dtypes", [ (np.dtype("int8"), np.dtype("int16")), (np.dtype("int16"), np.dtype("int32")), (np.dtype("int32"), np.dtype("int64")), (np.dtype("uint8"), np.dtype("uint16")), (np.dtype("uint16"), np.dtype("uint32")), (np.dtype("uint32"), np.dtype("uint64")), (np.dtype("float32"), np.dtype("float64")), (np.dtype("int32"), np.dtype("float32")), (np.dtype("uint32"), np.dtype("float32")), ], ) def test_typecast_on_join_overflow_unsafe(dtypes): dtype_l, dtype_r = dtypes if dtype_l.kind in {"i", "u"}: dtype_l_max = np.iinfo(dtype_l).max elif dtype_l.kind == "f": dtype_l_max = np.finfo(dtype_r).max lhs = cudf.DataFrame({"a": [1, 2, 3, 4, 5]}, dtype=dtype_l) rhs = cudf.DataFrame({"a": [1, 2, 3, 4, dtype_l_max + 1]}, dtype=dtype_r) p_lhs = lhs.to_pandas() p_rhs = rhs.to_pandas() with expect_warning_if( (dtype_l.kind == "f" and dtype_r.kind in {"i", "u"}) or (dtype_l.kind in {"i", "u"} and dtype_r.kind == "f"), UserWarning, ): expect = p_lhs.merge(p_rhs, on="a", how="left") got = lhs.merge(rhs, on="a", how="left") # The dtypes here won't match exactly because pandas does some unsafe # conversions (with a warning that we are catching above) that we don't # want to match. assert_join_results_equal(expect, got, how="left", check_dtype=False) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(5, 2), Decimal64Dtype(7, 5), Decimal64Dtype(12, 7), Decimal128Dtype(20, 5), ], ) def test_decimal_typecast_inner(dtype): other_data = ["a", "b", "c", "d", "e"] join_data_l = cudf.Series(["1.6", "9.5", "7.2", "8.7", "2.3"]).astype( dtype ) join_data_r = cudf.Series(["1.6", "9.5", "7.2", "4.5", "2.3"]).astype( dtype ) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_join_data = ["1.6", "9.5", "7.2", "2.3"] exp_other_data = ["a", "b", "c", "e"] exp_join_col = cudf.Series(exp_join_data).astype(dtype) expected = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data, "B_y": exp_other_data, } ) got = gdf_l.merge(gdf_r, on="join_col", how="inner") assert_join_results_equal(expected, got, how="inner") assert_eq(dtype, got["join_col"].dtype) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(7, 3), Decimal64Dtype(9, 5), Decimal64Dtype(14, 10), Decimal128Dtype(21, 9), ], ) def test_decimal_typecast_left(dtype): other_data = ["a", "b", "c", "d"] join_data_l = cudf.Series(["95.05", "384.26", "74.22", "1456.94"]).astype( dtype ) join_data_r = cudf.Series( ["95.05", "62.4056", "74.22", "1456.9472"] ).astype(dtype) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_join_data = ["95.05", "74.22", "384.26", "1456.94"] exp_other_data_x = ["a", "c", "b", "d"] exp_other_data_y = ["a", "c", None, None] exp_join_col = cudf.Series(exp_join_data).astype(dtype) expected = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data_x, "B_y": exp_other_data_y, } ) got = gdf_l.merge(gdf_r, on="join_col", how="left") assert_join_results_equal(expected, got, how="left") assert_eq(dtype, got["join_col"].dtype) @pytest.mark.parametrize( "dtype", [ Decimal64Dtype(7, 3), Decimal64Dtype(10, 5), Decimal64Dtype(18, 9), Decimal128Dtype(22, 8), ], ) def test_decimal_typecast_outer(dtype): other_data = ["a", "b", "c"] join_data_l = cudf.Series(["741.248", "1029.528", "3627.292"]).astype( dtype ) join_data_r = cudf.Series(["9284.103", "1029.528", "948.637"]).astype( dtype ) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_join_data = ["9284.103", "948.637", "1029.528", "741.248", "3627.292"] exp_other_data_x = [None, None, "b", "a", "c"] exp_other_data_y = ["a", "c", "b", None, None] exp_join_col = cudf.Series(exp_join_data).astype(dtype) expected = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data_x, "B_y": exp_other_data_y, } ) got = gdf_l.merge(gdf_r, on="join_col", how="outer") assert_join_results_equal(expected, got, how="outer") assert_eq(dtype, got["join_col"].dtype) @pytest.mark.parametrize( "dtype_l", [Decimal64Dtype(7, 3), Decimal64Dtype(9, 5)], ) @pytest.mark.parametrize( "dtype_r", [Decimal64Dtype(8, 3), Decimal64Dtype(11, 6)], ) def test_mixed_decimal_typecast(dtype_l, dtype_r): other_data = ["a", "b", "c", "d"] join_data_l = cudf.Series(["95.05", "34.6", "74.22", "14.94"]).astype( dtype_r ) join_data_r = cudf.Series(["95.05", "62.4056", "74.22", "1.42"]).astype( dtype_l ) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) with pytest.raises( TypeError, match="Decimal columns can only be merged with decimal columns " "of the same precision and scale", ): gdf_l.merge(gdf_r, on="join_col", how="inner") @pytest.mark.parametrize( "dtype_l", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) @pytest.mark.parametrize( "dtype_r", ["datetime64[s]", "datetime64[ms]", "datetime64[us]", "datetime64[ns]"], ) def test_typecast_on_join_dt_to_dt(dtype_l, dtype_r): other_data = ["a", "b", "c", "d", "e"] join_data_l = cudf.Series( ["1991-11-20", "1999-12-31", "2004-12-04", "2015-01-01", "2019-08-15"] ).astype(dtype_l) join_data_r = cudf.Series( ["1991-11-20", "1999-12-31", "2004-12-04", "2015-01-01", "2019-08-16"] ).astype(dtype_r) gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_dtype = max(np.dtype(dtype_l), np.dtype(dtype_r)) exp_join_data = ["1991-11-20", "1999-12-31", "2004-12-04", "2015-01-01"] exp_other_data = ["a", "b", "c", "d"] exp_join_col = cudf.Series(exp_join_data, dtype=exp_dtype) expect = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data, "B_y": exp_other_data, } ) got = gdf_l.merge(gdf_r, on="join_col", how="inner") assert_join_results_equal(expect, got, how="inner") @pytest.mark.parametrize("dtype_l", ["category", "str", "int32", "float32"]) @pytest.mark.parametrize("dtype_r", ["category", "str", "int32", "float32"]) def test_typecast_on_join_categorical(dtype_l, dtype_r): if not (dtype_l == "category" or dtype_r == "category"): pytest.skip("at least one side must be category for this set of tests") if dtype_l == "category" and dtype_r == "category": pytest.skip("Can't determine which categorical to use") other_data = ["a", "b", "c", "d", "e"] join_data_l = cudf.Series([1, 2, 3, 4, 5], dtype=dtype_l) join_data_r = cudf.Series([1, 2, 3, 4, 6], dtype=dtype_r) if dtype_l == "category": exp_dtype = join_data_l.dtype.categories.dtype elif dtype_r == "category": exp_dtype = join_data_r.dtype.categories.dtype gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) exp_join_data = [1, 2, 3, 4] exp_other_data = ["a", "b", "c", "d"] exp_join_col = cudf.Series(exp_join_data, dtype=exp_dtype) expect = cudf.DataFrame( { "join_col": exp_join_col, "B_x": exp_other_data, "B_y": exp_other_data, } ) got = gdf_l.merge(gdf_r, on="join_col", how="inner") assert_join_results_equal(expect, got, how="inner") def make_categorical_dataframe(categories, ordered=False): dtype = CategoricalDtype(categories=categories, ordered=ordered) data = cudf.Series(categories).astype(dtype) return cudf.DataFrame({"key": data}) def test_categorical_typecast_inner(): # Inner join casting rules for categoricals # Equal categories, equal ordering -> common categorical left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([1, 2, 3], ordered=False) result = left.merge(right, how="inner", on="key") expect_dtype = CategoricalDtype(categories=[1, 2, 3], ordered=False) expect_data = cudf.Series([1, 2, 3], dtype=expect_dtype, name="key") assert_join_results_equal( expect_data, result["key"], how="inner", check_categorical=False ) # Equal categories, unequal ordering -> error left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([1, 2, 3], ordered=True) with pytest.raises(TypeError): result = left.merge(right, how="inner", on="key") # Unequal categories # Neither ordered -> unordered categorical with intersection left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([2, 3, 4], ordered=False) result = left.merge(right, how="inner", on="key") expect_dtype = cudf.CategoricalDtype(categories=[2, 3], ordered=False) expect_data = cudf.Series([2, 3], dtype=expect_dtype, name="key") assert_join_results_equal( expect_data, result["key"], how="inner", check_categorical=False ) # One is ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([2, 3, 4], ordered=True) with pytest.raises(TypeError): result = left.merge(right, how="inner", on="key") # Both are ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=True) right = make_categorical_dataframe([2, 3, 4], ordered=True) with pytest.raises(TypeError): result = left.merge(right, how="inner", on="key") def test_categorical_typecast_left(): # TODO: generalize to right or write another test # Left join casting rules for categoricals # equal categories, neither ordered -> common dtype left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([1, 2, 3], ordered=False) result = left.merge(right, on="key", how="left") expect_dtype = CategoricalDtype(categories=[1, 2, 3], ordered=False) expect_data = cudf.Series([1, 2, 3], dtype=expect_dtype, name="key") assert_join_results_equal(expect_data, result["key"], how="left") # equal categories, unequal ordering -> error left = make_categorical_dataframe([1, 2, 3], ordered=True) right = make_categorical_dataframe([1, 2, 3], ordered=False) with pytest.raises(TypeError): result = left.merge(right, on="key", how="left") with pytest.raises(TypeError): result = right.merge(left, on="key", how="left") # unequal categories neither ordered -> left dtype left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([2, 3, 4], ordered=False) result = left.merge(right, on="key", how="left") expect_dtype = CategoricalDtype(categories=[1, 2, 3], ordered=False) expect_data = cudf.Series([1, 2, 3], dtype=expect_dtype, name="key") assert_join_results_equal(expect_data, result["key"], how="left") # unequal categories, unequal ordering -> error left = make_categorical_dataframe([1, 2, 3], ordered=True) right = make_categorical_dataframe([2, 3, 4], ordered=False) with pytest.raises(TypeError): result = left.merge(right, on="key", how="left") # unequal categories, right ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([2, 3, 4], ordered=True) with pytest.raises(TypeError): result = left.merge(right, on="key", how="left") # unequal categories, both ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=True) right = make_categorical_dataframe([2, 3, 4], ordered=True) with pytest.raises(TypeError): result = left.merge(right, on="key", how="left") def test_categorical_typecast_outer(): # Outer join casting rules for categoricals # equal categories, neither ordered -> common dtype left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([1, 2, 3], ordered=False) result = left.merge(right, on="key", how="outer") expect_dtype = CategoricalDtype(categories=[1, 2, 3], ordered=False) expect_data = cudf.Series([1, 2, 3], dtype=expect_dtype, name="key") assert_join_results_equal(expect_data, result["key"], how="outer") # equal categories, both ordered -> common dtype left = make_categorical_dataframe([1, 2, 3], ordered=True) right = make_categorical_dataframe([1, 2, 3], ordered=True) result = left.merge(right, on="key", how="outer") expect_dtype = CategoricalDtype(categories=[1, 2, 3], ordered=True) expect_data = cudf.Series([1, 2, 3], dtype=expect_dtype, name="key") assert_join_results_equal(expect_data, result["key"], how="outer") # equal categories, one ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([1, 2, 3], ordered=True) with pytest.raises(TypeError): result = left.merge(right, how="outer", on="key") with pytest.raises(TypeError): result = right.merge(left, how="outer", on="key") # unequal categories, neither ordered -> superset left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([2, 3, 4], ordered=False) result = left.merge(right, on="key", how="outer") expect_dtype = CategoricalDtype(categories=[1, 2, 3, 4], ordered=False) expect_data = cudf.Series([1, 2, 3, 4], dtype=expect_dtype, name="key") assert_join_results_equal(expect_data, result["key"], how="outer") # unequal categories, one ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=False) right = make_categorical_dataframe([2, 3, 4], ordered=True) with pytest.raises(TypeError): result = left.merge(right, how="outer", on="key") with pytest.raises(TypeError): result = right.merge(left, how="outer", on="key") # unequal categories, both ordered -> error left = make_categorical_dataframe([1, 2, 3], ordered=True) right = make_categorical_dataframe([2, 3, 4], ordered=True) with pytest.raises(TypeError): result = left.merge(right, how="outer", on="key") @pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["object"]) def test_categorical_typecast_inner_one_cat(dtype): data = np.array([1, 2, 3], dtype=dtype) left = make_categorical_dataframe(data) right = left.astype(left["key"].dtype.categories.dtype) result = left.merge(right, on="key", how="inner") assert result["key"].dtype == left["key"].dtype.categories.dtype @pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["object"]) def test_categorical_typecast_left_one_cat(dtype): data = np.array([1, 2, 3], dtype=dtype) left = make_categorical_dataframe(data) right = left.astype(left["key"].dtype.categories.dtype) result = left.merge(right, on="key", how="left") assert result["key"].dtype == left["key"].dtype @pytest.mark.parametrize("dtype", NUMERIC_TYPES + ["object"]) def test_categorical_typecast_outer_one_cat(dtype): data = np.array([1, 2, 3], dtype=dtype) left = make_categorical_dataframe(data) right = left.astype(left["key"].dtype.categories.dtype) result = left.merge(right, on="key", how="outer") assert result["key"].dtype == left["key"].dtype.categories.dtype @pytest.mark.parametrize( ("lhs", "rhs"), [ (["a", "b"], ["a"]), (["a"], ["a", "b"]), (["a", "b"], ["b"]), (["b"], ["a", "b"]), (["a"], ["a"]), ], ) @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) @pytest.mark.parametrize("level", ["a", "b", 0, 1]) def test_index_join(lhs, rhs, how, level): l_pdf = pd.DataFrame({"a": [2, 3, 1, 4], "b": [3, 7, 8, 1]}) r_pdf = pd.DataFrame({"a": [1, 5, 4, 0], "b": [3, 9, 8, 4]}) l_df = cudf.from_pandas(l_pdf) r_df = cudf.from_pandas(r_pdf) p_lhs = l_pdf.set_index(lhs).index p_rhs = r_pdf.set_index(rhs).index g_lhs = l_df.set_index(lhs).index g_rhs = r_df.set_index(rhs).index expected = p_lhs.join(p_rhs, level=level, how=how).to_frame(index=False) got = g_lhs.join(g_rhs, level=level, how=how).to_frame(index=False) assert_join_results_equal(expected, got, how=how) def test_index_join_corner_cases(): l_pdf = pd.DataFrame({"a": [2, 3, 1, 4], "b": [3, 7, 8, 1]}) r_pdf = pd.DataFrame( {"a": [1, 5, 4, 0], "b": [3, 9, 8, 4], "c": [2, 3, 6, 0]} ) l_df = cudf.from_pandas(l_pdf) r_df = cudf.from_pandas(r_pdf) # Join when column name doesn't match with level lhs = ["a", "b"] # level and rhs don't match rhs = ["c"] level = "b" how = "outer" p_lhs = l_pdf.set_index(lhs).index p_rhs = r_pdf.set_index(rhs).index g_lhs = l_df.set_index(lhs).index g_rhs = r_df.set_index(rhs).index expected = p_lhs.join(p_rhs, level=level, how=how).to_frame(index=False) got = g_lhs.join(g_rhs, level=level, how=how).to_frame(index=False) assert_join_results_equal(expected, got, how=how) # sort is supported only in case of two non-MultiIndex join # Join when column name doesn't match with level lhs = ["a"] # level and rhs don't match rhs = ["a"] level = "b" how = "left" p_lhs = l_pdf.set_index(lhs).index p_rhs = r_pdf.set_index(rhs).index g_lhs = l_df.set_index(lhs).index g_rhs = r_df.set_index(rhs).index expected = p_lhs.join(p_rhs, how=how, sort=True) got = g_lhs.join(g_rhs, how=how, sort=True) assert_join_results_equal(expected, got, how=how) # Pandas Index.join on categorical column returns generic column # but cudf will be returning a categorical column itself. lhs = ["a", "b"] rhs = ["a"] level = "a" how = "inner" l_df["a"] = l_df["a"].astype("category") r_df["a"] = r_df["a"].astype("category") p_lhs = l_pdf.set_index(lhs).index p_rhs = r_pdf.set_index(rhs).index g_lhs = l_df.set_index(lhs).index g_rhs = r_df.set_index(rhs).index expected = p_lhs.join(p_rhs, level=level, how=how).to_frame(index=False) got = g_lhs.join(g_rhs, level=level, how=how).to_frame(index=False) got["a"] = got["a"].astype(expected["a"].dtype) assert_join_results_equal(expected, got, how=how) def test_index_join_exception_cases(): l_df = cudf.DataFrame({"a": [2, 3, 1, 4], "b": [3, 7, 8, 1]}) r_df = cudf.DataFrame( {"a": [1, 5, 4, 0], "b": [3, 9, 8, 4], "c": [2, 3, 6, 0]} ) # Join between two MultiIndex lhs = ["a", "b"] rhs = ["a", "c"] level = "a" how = "outer" g_lhs = l_df.set_index(lhs).index g_rhs = r_df.set_index(rhs).index with pytest.raises(TypeError): g_lhs.join(g_rhs, level=level, how=how) # Improper level value, level should be an int or scalar value level = ["a"] rhs = ["a"] g_lhs = l_df.set_index(lhs).index g_rhs = r_df.set_index(rhs).index with pytest.raises(ValueError): g_lhs.join(g_rhs, level=level, how=how) def test_typecast_on_join_indexes(): join_data_l = cudf.Series([1, 2, 3, 4, 5], dtype="int8") join_data_r = cudf.Series([1, 2, 3, 4, 6], dtype="int32") other_data = ["a", "b", "c", "d", "e"] gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) gdf_l = gdf_l.set_index("join_col") gdf_r = gdf_r.set_index("join_col") exp_join_data = [1, 2, 3, 4] exp_other_data = ["a", "b", "c", "d"] expect = cudf.DataFrame( { "join_col": exp_join_data, "B_x": exp_other_data, "B_y": exp_other_data, } ) expect = expect.set_index("join_col") got = gdf_l.join(gdf_r, how="inner", lsuffix="_x", rsuffix="_y") assert_join_results_equal(expect, got, how="inner") def test_typecast_on_join_multiindices(): join_data_l_0 = cudf.Series([1, 2, 3, 4, 5], dtype="int8") join_data_l_1 = cudf.Series([2, 3, 4.1, 5.9, 6], dtype="float32") join_data_l_2 = cudf.Series([7, 8, 9, 0, 1], dtype="float32") join_data_r_0 = cudf.Series([1, 2, 3, 4, 5], dtype="int32") join_data_r_1 = cudf.Series([2, 3, 4, 5, 6], dtype="int32") join_data_r_2 = cudf.Series([7, 8, 9, 0, 0], dtype="float64") other_data = ["a", "b", "c", "d", "e"] gdf_l = cudf.DataFrame( { "join_col_0": join_data_l_0, "join_col_1": join_data_l_1, "join_col_2": join_data_l_2, "B": other_data, } ) gdf_r = cudf.DataFrame( { "join_col_0": join_data_r_0, "join_col_1": join_data_r_1, "join_col_2": join_data_r_2, "B": other_data, } ) gdf_l = gdf_l.set_index(["join_col_0", "join_col_1", "join_col_2"]) gdf_r = gdf_r.set_index(["join_col_0", "join_col_1", "join_col_2"]) exp_join_data_0 = cudf.Series([1, 2], dtype="int32") exp_join_data_1 = cudf.Series([2, 3], dtype="float64") exp_join_data_2 = cudf.Series([7, 8], dtype="float64") exp_other_data = cudf.Series(["a", "b"]) expect = cudf.DataFrame( { "join_col_0": exp_join_data_0, "join_col_1": exp_join_data_1, "join_col_2": exp_join_data_2, "B_x": exp_other_data, "B_y": exp_other_data, } ) expect = expect.set_index(["join_col_0", "join_col_1", "join_col_2"]) got = gdf_l.join(gdf_r, how="inner", lsuffix="_x", rsuffix="_y") assert_join_results_equal(expect, got, how="inner") def test_typecast_on_join_indexes_matching_categorical(): join_data_l = cudf.Series(["a", "b", "c", "d", "e"], dtype="category") join_data_r = cudf.Series(["a", "b", "c", "d", "e"], dtype="str") other_data = [1, 2, 3, 4, 5] gdf_l = cudf.DataFrame({"join_col": join_data_l, "B": other_data}) gdf_r = cudf.DataFrame({"join_col": join_data_r, "B": other_data}) gdf_l = gdf_l.set_index("join_col") gdf_r = gdf_r.set_index("join_col") exp_join_data = ["a", "b", "c", "d", "e"] exp_other_data = [1, 2, 3, 4, 5] expect = cudf.DataFrame( { "join_col": exp_join_data, "B_x": exp_other_data, "B_y": exp_other_data, } ) expect = expect.set_index("join_col") got = gdf_l.join(gdf_r, how="inner", lsuffix="_x", rsuffix="_y") assert_join_results_equal(expect, got, how="inner") @pytest.mark.parametrize( "lhs", [ cudf.Series([1, 2, 3], name="a"), cudf.DataFrame({"a": [2, 3, 4], "c": [4, 5, 6]}), ], ) @pytest.mark.parametrize( "rhs", [ cudf.Series([1, 2, 3], name="b"), cudf.DataFrame({"b": [2, 3, 4], "c": [4, 5, 6]}), ], ) @pytest.mark.parametrize( "how", ["left", "inner", "outer", "leftanti", "leftsemi"] ) @pytest.mark.parametrize( "kwargs", [ {"left_on": "a", "right_on": "b"}, {"left_index": True, "right_on": "b"}, {"left_on": "a", "right_index": True}, {"left_index": True, "right_index": True}, ], ) def test_series_dataframe_mixed_merging(lhs, rhs, how, kwargs): if how in ("leftsemi", "leftanti") and ( kwargs.get("left_index") or kwargs.get("right_index") ): pytest.skip("Index joins not compatible with leftsemi and leftanti") check_lhs = lhs.copy() check_rhs = rhs.copy() if isinstance(lhs, cudf.Series): check_lhs = lhs.to_frame() if isinstance(rhs, cudf.Series): check_rhs = rhs.to_frame() expect = cudf.merge(check_lhs, check_rhs, how=how, **kwargs) got = cudf.merge(lhs, rhs, how=how, **kwargs) assert_join_results_equal(expect, got, how=how) @pytest.mark.xfail(reason="Cannot sort values of list dtype") @pytest.mark.parametrize( "how", ["left", "inner", "right", "leftanti", "leftsemi"] ) def test_merge_with_lists(how): pd_left = pd.DataFrame( { "a": [1, 2, 3, 4, 5, 6], "b": [[1, 2, 3], [4, 5], None, [6], [7, 8, None], []], "c": ["a", "b", "c", "d", "e", "f"], } ) pd_right = pd.DataFrame( { "a": [4, 3, 2, 1, 0, -1], "d": [[[1, 2], None], [], [[3, 4]], None, [[5], [6, 7]], [[8]]], } ) gd_left = cudf.from_pandas(pd_left) gd_right = cudf.from_pandas(pd_right) expect = pd_left.merge(pd_right, on="a") got = gd_left.merge(gd_right, on="a") assert_join_results_equal(expect, got, how=how) def test_join_renamed_index(): df = cudf.DataFrame( {0: [1, 2, 3, 4, 5], 1: [1, 2, 3, 4, 5], "c": [1, 2, 3, 4, 5]} ).set_index([0, 1]) df.index.names = ["a", "b"] # doesn't actually change df._index._data expect = df.to_pandas().merge( df.to_pandas(), left_index=True, right_index=True ) got = df.merge(df, left_index=True, right_index=True, how="inner") assert_join_results_equal(expect, got, how="inner") @pytest.mark.parametrize( "lhs_col, lhs_idx, rhs_col, rhs_idx, on", [ (["A", "B"], "L0", ["B", "C"], "L0", ["B"]), (["A", "B"], "L0", ["B", "C"], "L0", ["L0"]), (["A", "B"], "L0", ["B", "C"], "L0", ["B", "L0"]), (["A", "B"], "L0", ["C", "L0"], "A", ["A"]), (["A", "B"], "L0", ["C", "L0"], "A", ["L0"]), (["A", "B"], "L0", ["C", "L0"], "A", ["A", "L0"]), ], ) @pytest.mark.parametrize( "how", ["left", "inner", "right", "outer", "leftanti", "leftsemi"] ) def test_join_merge_with_on(lhs_col, lhs_idx, rhs_col, rhs_idx, on, how): lhs_data = {col_name: [4, 5, 6] for col_name in lhs_col} lhs_index = cudf.Index([0, 1, 2], name=lhs_idx) rhs_data = {col_name: [4, 5, 6] for col_name in rhs_col} rhs_index = cudf.Index([2, 3, 4], name=rhs_idx) gd_left = cudf.DataFrame(lhs_data, lhs_index) gd_right = cudf.DataFrame(rhs_data, rhs_index) pd_left = gd_left.to_pandas() pd_right = gd_right.to_pandas() expect = pd_left.merge(pd_right, on=on).sort_index(axis=1, ascending=False) got = gd_left.merge(gd_right, on=on).sort_index(axis=1, ascending=False) assert_join_results_equal(expect, got, how=how) @pytest.mark.parametrize( "on", ["A", "L0"], ) @pytest.mark.parametrize( "how", ["left", "inner", "right", "outer", "leftanti", "leftsemi"] ) def test_join_merge_invalid_keys(on, how): gd_left = cudf.DataFrame( {"A": [1, 2, 3], "B": [4, 5, 6]}, index=cudf.Index([0, 1, 2], name="C") ) gd_right = cudf.DataFrame( {"D": [2, 3, 4], "E": [7, 8, 0]}, index=cudf.Index([0, 2, 4], name="F") ) pd_left = gd_left.to_pandas() pd_right = gd_right.to_pandas() with pytest.raises(KeyError): pd_left.merge(pd_right, on=on) gd_left.merge(gd_right, on=on) @pytest.mark.parametrize( "str_data", [[], ["a", "b", "c", "d", "e"], [None, None, None, None, None]], ) @pytest.mark.parametrize("num_keys", [1, 2, 3]) @pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) def test_string_join_key(str_data, num_keys, how): other_data = [1, 2, 3, 4, 5][: len(str_data)] pdf = pd.DataFrame() gdf = cudf.DataFrame() for i in range(num_keys): pdf[i] = pd.Series(str_data, dtype="str") gdf[i] = cudf.Series(str_data, dtype="str") pdf["a"] = other_data gdf["a"] = other_data pdf2 = pdf.copy() gdf2 = gdf.copy() expect = pdf.merge(pdf2, on=list(range(num_keys)), how=how) got = gdf.merge(gdf2, on=list(range(num_keys)), how=how) if len(expect) == 0 and len(got) == 0: expect = expect.reset_index(drop=True) got = got[expect.columns] # reorder columns if how == "right": got = got[expect.columns] # reorder columns assert_join_results_equal(expect, got, how=how) @pytest.mark.parametrize( "str_data_nulls", [ ["a", "b", "c"], ["a", "b", "f", "g"], ["f", "g", "h", "i", "j"], ["f", "g", "h"], [None, None, None, None, None], [], ], ) def test_string_join_key_nulls(str_data_nulls): str_data = ["a", "b", "c", "d", "e"] other_data = [1, 2, 3, 4, 5] other_data_nulls = [6, 7, 8, 9, 10][: len(str_data_nulls)] pdf = pd.DataFrame() gdf = cudf.DataFrame() pdf["key"] = pd.Series(str_data, dtype="str") gdf["key"] = cudf.Series(str_data, dtype="str") pdf["vals"] = other_data gdf["vals"] = other_data pdf2 = pd.DataFrame() gdf2 = cudf.DataFrame() pdf2["key"] = pd.Series(str_data_nulls, dtype="str") gdf2["key"] = cudf.Series(str_data_nulls, dtype="str") pdf2["vals"] = pd.Series(other_data_nulls, dtype="int64") gdf2["vals"] = cudf.Series(other_data_nulls, dtype="int64") expect = pdf.merge(pdf2, on="key", how="left") got = gdf.merge(gdf2, on="key", how="left") got["vals_y"] = got["vals_y"].fillna(-1) if len(expect) == 0 and len(got) == 0: expect = expect.reset_index(drop=True) got = got[expect.columns] expect["vals_y"] = expect["vals_y"].fillna(-1).astype("int64") assert_join_results_equal(expect, got, how="left") @pytest.mark.parametrize( "str_data", [[], ["a", "b", "c", "d", "e"], [None, None, None, None, None]] ) @pytest.mark.parametrize("num_cols", [1, 2, 3]) @pytest.mark.parametrize("how", ["left", "right", "inner", "outer"]) def test_string_join_non_key(str_data, num_cols, how): other_data = [1, 2, 3, 4, 5][: len(str_data)] pdf = pd.DataFrame() gdf = cudf.DataFrame() for i in range(num_cols): pdf[i] = pd.Series(str_data, dtype="str") gdf[i] = cudf.Series(str_data, dtype="str") pdf["a"] = other_data gdf["a"] = other_data pdf2 = pdf.copy() gdf2 = gdf.copy() expect = pdf.merge(pdf2, on=["a"], how=how) got = gdf.merge(gdf2, on=["a"], how=how) if len(expect) == 0 and len(got) == 0: expect = expect.reset_index(drop=True) got = got[expect.columns] if how == "right": got = got[expect.columns] # reorder columns assert_join_results_equal(expect, got, how=how) @pytest.mark.parametrize( "str_data_nulls", [ ["a", "b", "c"], ["a", "b", "f", "g"], ["f", "g", "h", "i", "j"], ["f", "g", "h"], [None, None, None, None, None], [], ], ) def test_string_join_non_key_nulls(str_data_nulls): str_data = ["a", "b", "c", "d", "e"] other_data = [1, 2, 3, 4, 5] other_data_nulls = [6, 7, 8, 9, 10][: len(str_data_nulls)] pdf = pd.DataFrame() gdf = cudf.DataFrame() pdf["vals"] = pd.Series(str_data, dtype="str") gdf["vals"] = cudf.Series(str_data, dtype="str") pdf["key"] = other_data gdf["key"] = other_data pdf2 = pd.DataFrame() gdf2 = cudf.DataFrame() pdf2["vals"] = pd.Series(str_data_nulls, dtype="str") gdf2["vals"] = cudf.Series(str_data_nulls, dtype="str") pdf2["key"] = pd.Series(other_data_nulls, dtype="int64") gdf2["key"] = cudf.Series(other_data_nulls, dtype="int64") expect = pdf.merge(pdf2, on="key", how="left") got = gdf.merge(gdf2, on="key", how="left") if len(expect) == 0 and len(got) == 0: expect = expect.reset_index(drop=True) got = got[expect.columns] assert_join_results_equal(expect, got, how="left") def test_string_join_values_nulls(): left_dict = [ {"b": "MATCH 1", "a": 1.0}, {"b": "MATCH 1", "a": 1.0}, {"b": "LEFT NO MATCH 1", "a": -1.0}, {"b": "MATCH 2", "a": 2.0}, {"b": "MATCH 2", "a": 2.0}, {"b": "MATCH 1", "a": 1.0}, {"b": "MATCH 1", "a": 1.0}, {"b": "MATCH 2", "a": 2.0}, {"b": "MATCH 2", "a": 2.0}, {"b": "LEFT NO MATCH 2", "a": -2.0}, {"b": "MATCH 3", "a": 3.0}, {"b": "MATCH 3", "a": 3.0}, ] right_dict = [ {"b": "RIGHT NO MATCH 1", "c": -1.0}, {"b": "MATCH 3", "c": 3.0}, {"b": "MATCH 2", "c": 2.0}, {"b": "RIGHT NO MATCH 2", "c": -2.0}, {"b": "RIGHT NO MATCH 3", "c": -3.0}, {"b": "MATCH 1", "c": 1.0}, ] left_pdf = pd.DataFrame(left_dict) right_pdf = pd.DataFrame(right_dict) left_gdf = cudf.DataFrame.from_pandas(left_pdf) right_gdf = cudf.DataFrame.from_pandas(right_pdf) expect = left_pdf.merge(right_pdf, how="left", on="b") got = left_gdf.merge(right_gdf, how="left", on="b") expect = expect.sort_values(by=["a", "b", "c"]).reset_index(drop=True) got = got.sort_values(by=["a", "b", "c"]).reset_index(drop=True) assert_join_results_equal(expect, got, how="left") @pytest.mark.parametrize( "left_on,right_on", [ *product(["a", "b", "c"], ["a", "b"]), *zip(combinations(["a", "b", "c"], 2), repeat(["a", "b"])), ], ) def test_merge_mixed_index_columns(left_on, right_on): left = pd.DataFrame({"a": [1, 2, 1, 2], "b": [2, 3, 3, 4]}).set_index("a") right = pd.DataFrame({"a": [1, 2, 1, 3], "b": [2, 30, 3, 4]}).set_index( "a" ) left["c"] = 10 expect = left.merge(right, left_on=left_on, right_on=right_on, how="outer") cleft = cudf.from_pandas(left) cright = cudf.from_pandas(right) got = cleft.merge(cright, left_on=left_on, right_on=right_on, how="outer") assert_join_results_equal(expect, got, how="outer") def test_merge_multiindex_columns(): lhs = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}) lhs.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")]) rhs = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}) rhs.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "z")]) expect = lhs.merge(rhs, on=[("a", "x")], how="inner") lhs = cudf.from_pandas(lhs) rhs = cudf.from_pandas(rhs) got = lhs.merge(rhs, on=[("a", "x")], how="inner") assert_join_results_equal(expect, got, how="inner") def test_join_multiindex_empty(): lhs = pd.DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}, index=["a", "b", "c"]) lhs.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")]) rhs = pd.DataFrame(index=["a", "c", "d"]) with pytest.warns(FutureWarning): expect = lhs.join(rhs, how="inner") lhs = cudf.from_pandas(lhs) rhs = cudf.from_pandas(rhs) with pytest.warns(FutureWarning): got = lhs.join(rhs, how="inner") assert_join_results_equal(expect, got, how="inner") def test_join_on_index_with_duplicate_names(): # although index levels with duplicate names are poorly supported # overall, we *should* be able to join on them: lhs = pd.DataFrame({"a": [1, 2, 3]}) rhs = pd.DataFrame({"b": [1, 2, 3]}) lhs.index = pd.MultiIndex.from_tuples( [(1, 1), (1, 2), (2, 1)], names=["x", "x"] ) rhs.index = pd.MultiIndex.from_tuples( [(1, 1), (1, 3), (2, 1)], names=["x", "x"] ) expect = lhs.join(rhs, how="inner") lhs = cudf.from_pandas(lhs) rhs = cudf.from_pandas(rhs) got = lhs.join(rhs, how="inner") assert_join_results_equal(expect, got, how="inner") def test_join_redundant_params(): lhs = cudf.DataFrame( {"a": [1, 2, 3], "c": [2, 3, 4]}, index=cudf.Index([0, 1, 2], name="c") ) rhs = cudf.DataFrame( {"b": [1, 2, 3]}, index=cudf.Index([0, 1, 2], name="a") ) with pytest.raises(ValueError): lhs.merge(rhs, on="a", left_index=True) with pytest.raises(ValueError): lhs.merge(rhs, left_on="a", left_index=True, right_index=True) with pytest.raises(ValueError): lhs.merge(rhs, right_on="a", left_index=True, right_index=True) with pytest.raises(ValueError): lhs.merge(rhs, left_on="c", right_on="b") def test_join_multiindex_index(): # test joining a MultiIndex with an Index with overlapping name lhs = ( cudf.DataFrame({"a": [2, 3, 1], "b": [3, 4, 2]}) .set_index(["a", "b"]) .index ) rhs = cudf.DataFrame({"a": [1, 4, 3]}).set_index("a").index expect = lhs.to_pandas().join(rhs.to_pandas(), how="inner") got = lhs.join(rhs, how="inner") assert_join_results_equal(expect, got, how="inner") def test_dataframe_join_on(): """Verify that specifying the on parameter gives a NotImplementedError.""" df = cudf.DataFrame({"a": [1, 2, 3]}) with pytest.raises(NotImplementedError): df.join(df, on="a") def test_index_join_return_indexers_notimplemented(): index = cudf.RangeIndex(start=0, stop=20, step=2) other = cudf.Index([4, 4, 3, 3]) with pytest.raises(NotImplementedError): index.join(other, how="left", return_indexers=True) @pytest.mark.parametrize("how", ["inner", "outer"]) def test_index_join_names(how): idx1 = cudf.Index([10, 1, 2, 4, 2, 1], name="a") idx2 = cudf.Index([-10, 2, 3, 1, 2], name="b") expected = idx1.to_pandas().join(idx2.to_pandas(), how=how) actual = idx1.join(idx2, how=how) assert_join_results_equal(actual, expected, how=how) @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) def test_join_datetime_timedelta_error(dtype): df1 = cudf.DataFrame({"a": cudf.Series([10, 20, 30], dtype=dtype)}) df2 = df1.astype("int") with pytest.raises(TypeError): df1.merge(df2) @pytest.mark.parametrize("dtype1", TIMEDELTA_TYPES) @pytest.mark.parametrize("dtype2", TIMEDELTA_TYPES) def test_merge_timedelta_types(dtype1, dtype2): df1 = cudf.DataFrame({"a": cudf.Series([10, 20, 30], dtype=dtype1)}) df2 = cudf.DataFrame({"a": cudf.Series([20, 500, 33240], dtype=dtype2)}) pdf1 = df1.to_pandas() pdf2 = df2.to_pandas() actual = df1.merge(df2) expected = pdf1.merge(pdf2) # Pandas is materializing the index, which is unnecessary # hence the special handling. assert_eq( actual, expected, check_index_type=False if isinstance(actual.index, cudf.RangeIndex) and isinstance(expected.index, pd.Index) else True, check_dtype=True, )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_string_udfs.py
# Copyright (c) 2022-2023, NVIDIA CORPORATION. import numba import numpy as np import pandas as pd import pytest from numba import cuda from numba.core.typing import signature as nb_signature from numba.types import CPointer, void import rmm import cudf from cudf._lib.strings_udf import ( column_from_udf_string_array, column_to_string_view_array, ) from cudf.core.udf.strings_typing import ( str_view_arg_handler, string_view, udf_string, ) from cudf.core.udf.utils import _PTX_FILE, _get_extensionty_size from cudf.testing._utils import assert_eq, sv_to_udf_str from cudf.utils._numba import _CUDFNumbaConfig def get_kernels(func, dtype, size): """ Create two kernels for testing a single scalar string function. The first tests the function's action on a string_view object and the second tests the same except using a udf_string object. Allocates an output vector with a dtype specified by the caller The returned kernels execute the input function on each data element of the input and returns the output into the output vector """ func = cuda.jit(device=True)(func) if dtype == "str": outty = CPointer(udf_string) else: outty = numba.np.numpy_support.from_dtype(dtype)[::1] sig = nb_signature(void, CPointer(string_view), outty) @cuda.jit(sig, link=[_PTX_FILE], extensions=[str_view_arg_handler]) def string_view_kernel(input_strings, output_col): id = cuda.grid(1) if id < size: st = input_strings[id] result = func(st) output_col[id] = result @cuda.jit(sig, link=[_PTX_FILE], extensions=[str_view_arg_handler]) def udf_string_kernel(input_strings, output_col): # test the string function with a udf_string as input id = cuda.grid(1) if id < size: st = input_strings[id] st = sv_to_udf_str(st) result = func(st) output_col[id] = result return string_view_kernel, udf_string_kernel def run_udf_test(data, func, dtype): """ Run a test kernel on a set of input data Converts the input data to a cuDF column and subsequently to an array of cudf::string_view objects. It then creates a CUDA kernel using get_kernel which calls the input function, and then assembles the result back into a cuDF series before comparing it with the equivalent pandas result """ if dtype == "str": output = rmm.DeviceBuffer( size=len(data) * _get_extensionty_size(udf_string) ) else: dtype = np.dtype(dtype) output = cudf.core.column.column_empty(len(data), dtype=dtype) cudf_column = cudf.core.column.as_column(data) str_views = column_to_string_view_array(cudf_column) sv_kernel, udf_str_kernel = get_kernels(func, dtype, len(data)) expect = pd.Series(data).apply(func) with _CUDFNumbaConfig(): sv_kernel.forall(len(data))(str_views, output) if dtype == "str": result = column_from_udf_string_array(output) else: result = output got = cudf.Series(result, dtype=dtype) assert_eq(expect, got, check_dtype=False) with _CUDFNumbaConfig(): udf_str_kernel.forall(len(data))(str_views, output) if dtype == "str": result = column_from_udf_string_array(output) else: result = output got = cudf.Series(result, dtype=dtype) assert_eq(expect, got, check_dtype=False) @pytest.fixture(scope="module") def data(): return [ "abc", "ABC", "AbC", "123", "123aBc", "123@.!", "", "rapids ai", "gpu", "True", "False", "1.234", ".123a", "0.013", "1.0", "01", "20010101", "cudf", "cuda", "gpu", "This Is A Title", "This is Not a Title", "Neither is This a Title", "NoT a TiTlE", "123 Title Works", ] @pytest.fixture(params=["cudf", "cuda", "gpucudf", "abc"]) def rhs(request): return request.param @pytest.fixture(params=["c", "cu", "2", "abc", "", "gpu"]) def substr(request): return request.param def test_string_udf_eq(data, rhs): def func(st): return st == rhs run_udf_test(data, func, "bool") def test_string_udf_ne(data, rhs): def func(st): return st != rhs run_udf_test(data, func, "bool") def test_string_udf_ge(data, rhs): def func(st): return st >= rhs run_udf_test(data, func, "bool") def test_string_udf_le(data, rhs): def func(st): return st <= rhs run_udf_test(data, func, "bool") def test_string_udf_gt(data, rhs): def func(st): return st > rhs run_udf_test(data, func, "bool") def test_string_udf_lt(data, rhs): def func(st): return st < rhs run_udf_test(data, func, "bool") def test_string_udf_contains(data, substr): def func(st): return substr in st run_udf_test(data, func, "bool") def test_string_udf_count(data, substr): def func(st): return st.count(substr) run_udf_test(data, func, "int32") def test_string_udf_find(data, substr): def func(st): return st.find(substr) run_udf_test(data, func, "int32") def test_string_udf_endswith(data, substr): def func(st): return st.endswith(substr) run_udf_test(data, func, "bool") def test_string_udf_isalnum(data): def func(st): return st.isalnum() run_udf_test(data, func, "bool") def test_string_udf_isalpha(data): def func(st): return st.isalpha() run_udf_test(data, func, "bool") def test_string_udf_isdecimal(data): def func(st): return st.isdecimal() run_udf_test(data, func, "bool") def test_string_udf_isdigit(data): def func(st): return st.isdigit() run_udf_test(data, func, "bool") def test_string_udf_islower(data): def func(st): return st.islower() run_udf_test(data, func, "bool") def test_string_udf_isnumeric(data): def func(st): return st.isnumeric() run_udf_test(data, func, "bool") def test_string_udf_isspace(data): def func(st): return st.isspace() run_udf_test(data, func, "bool") def test_string_udf_isupper(data): def func(st): return st.isupper() run_udf_test(data, func, "bool") def test_string_udf_istitle(data): def func(st): return st.istitle() run_udf_test(data, func, "bool") def test_string_udf_len(data): def func(st): return len(st) run_udf_test(data, func, "int64") def test_string_udf_rfind(data, substr): def func(st): return st.rfind(substr) run_udf_test(data, func, "int32") def test_string_udf_startswith(data, substr): def func(st): return st.startswith(substr) run_udf_test(data, func, "bool") def test_string_udf_return_string(data): def func(st): return st run_udf_test(data, func, "str") @pytest.mark.parametrize("strip_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_strip(data, strip_char): def func(st): return st.strip(strip_char) run_udf_test(data, func, "str") @pytest.mark.parametrize("strip_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_lstrip(data, strip_char): def func(st): return st.lstrip(strip_char) run_udf_test(data, func, "str") @pytest.mark.parametrize("strip_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_rstrip(data, strip_char): def func(st): return st.rstrip(strip_char) run_udf_test(data, func, "str") def test_string_udf_upper(data): def func(st): return st.upper() run_udf_test(data, func, "str") def test_string_udf_lower(data): def func(st): return st.lower() run_udf_test(data, func, "str") @pytest.mark.parametrize("concat_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_concat(data, concat_char): def func(st): return st + concat_char run_udf_test(data, func, "str") @pytest.mark.parametrize("concat_char", ["1", "a", "12", " ", "", ".", "@"]) def test_string_udf_concat_reflected(data, concat_char): def func(st): return concat_char + st run_udf_test(data, func, "str") @pytest.mark.parametrize("to_replace", ["a", "1", "", "@"]) @pytest.mark.parametrize("replacement", ["a", "1", "", "@"]) def test_string_udf_replace(data, to_replace, replacement): def func(st): return st.replace(to_replace, replacement) run_udf_test(data, func, "str")
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_custom_accessor.py
# Copyright (c) 2020-2023, NVIDIA CORPORATION. import pandas as pd import pytest import cudf as gd from cudf.testing._utils import assert_eq @gd.api.extensions.register_dataframe_accessor("point") @pd.api.extensions.register_dataframe_accessor("point") class PointsAccessor: def __init__(self, obj): self._validate(obj) self._obj = obj @staticmethod def _validate(obj): cols = obj.columns if not all(vertex in cols for vertex in ["x", "y"]): raise AttributeError("Must have vertices 'x', 'y'.") @property def bounding_box(self): xs, ys = self._obj["x"], self._obj["y"] min_x, min_y, max_x, max_y = xs.min(), ys.min(), xs.max(), ys.max() return (min_x, min_y, max_x, max_y) @pytest.mark.parametrize( "gdf", [gd.datasets.randomdata(nrows=6, dtypes={"x": int, "y": int})] ) def test_dataframe_accessor(gdf): pdf = gdf.to_pandas() assert_eq(gdf.point.bounding_box, pdf.point.bounding_box) @pytest.mark.parametrize( "gdf1", [gd.datasets.randomdata(nrows=1, dtypes={"x": int, "y": int})] ) @pytest.mark.parametrize( "gdf2", [gd.datasets.randomdata(nrows=1, dtypes={"x": int, "y": int})] ) def test_dataframe_accessor_idendity(gdf1, gdf2): """Test for accessor identities - An object should hold persistent reference to the same accessor - Different objects should hold difference instances of the accessor """ assert gdf1.point is gdf1.point assert gdf1.point is not gdf2.point @pd.api.extensions.register_index_accessor("odd") @pd.api.extensions.register_series_accessor("odd") @gd.api.extensions.register_index_accessor("odd") @gd.api.extensions.register_series_accessor("odd") class OddRowAccessor: def __init__(self, obj): self._obj = obj def __getitem__(self, i): return self._obj[2 * i - 1] @pytest.mark.parametrize("gidx", [gd.Index(list(range(0, 50)))]) def test_index_accessor(gidx): pidx = gidx.to_pandas() for i in range(1, 10): assert_eq(gidx.odd[i], pidx.odd[i]) @pytest.mark.parametrize("gs", [gd.Series(list(range(1, 50)))]) def test_series_accessor(gs): ps = gs.to_pandas() for i in range(1, 10): assert_eq(gs.odd[i], ps.odd[i]) @pytest.mark.parametrize( "gdf", [gd.datasets.randomdata(nrows=6, dtypes={"x": int, "y": int})] ) @pytest.mark.parametrize("gidx", [gd.Index(list(range(1, 50)))]) @pytest.mark.parametrize("gs", [gd.Series(list(range(1, 50)))]) def test_accessor_space_separate(gdf, gidx, gs): assert not id(gdf._accessors) == id(gidx._accessors) assert not id(gidx._accessors) == id(gs._accessors) assert not id(gdf._accessors) == id(gs._accessors)
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_json.py
# Copyright (c) 2018-2023, NVIDIA CORPORATION. import copy import gzip import itertools import os from io import BytesIO, StringIO from pathlib import Path import numpy as np import pandas as pd import pyarrow as pa import pytest import cudf from cudf.testing._utils import ( DATETIME_TYPES, NUMERIC_TYPES, TIMEDELTA_TYPES, assert_eq, ) def make_numeric_dataframe(nrows, dtype): df = pd.DataFrame() df["col1"] = np.arange(nrows, dtype=dtype) df["col2"] = np.arange(1, 1 + nrows, dtype=dtype) return df @pytest.fixture(params=[0, 1, 10, 100]) def pdf(request): types = NUMERIC_TYPES + DATETIME_TYPES + ["bool"] nrows = request.param # Create a pandas dataframe with random data of mixed types test_pdf = pd.DataFrame( { f"col_{typ}": np.random.randint(0, nrows, nrows).astype(typ) for typ in types } ) # Delete the name of the column index, and rename the row index test_pdf.columns.name = None test_pdf.index.name = "test_index" return test_pdf @pytest.fixture def gdf(pdf): return cudf.DataFrame.from_pandas(pdf) @pytest.fixture(params=[0, 1, 10, 100]) def gdf_writer_types(request): # datetime64[us], datetime64[ns] are unsupported due to a bug in parser types = ( NUMERIC_TYPES + ["datetime64[s]", "datetime64[ms]"] + TIMEDELTA_TYPES + ["bool", "str"] ) typer = {"col_" + val: val for val in types} ncols = len(types) nrows = request.param # Create a pandas dataframe with random data of mixed types test_pdf = cudf.DataFrame( [list(range(ncols * i, ncols * (i + 1))) for i in range(nrows)], columns=pd.Index([f"col_{typ}" for typ in types]), ) # Cast all the column dtypes to objects, rename them, and then cast to # appropriate types test_pdf = test_pdf.astype(typer) return test_pdf index_params = [True, False] compression_params = ["gzip", "bz2", "zip", "xz", None] orient_params = ["columns", "records", "table", "split"] params = itertools.product(index_params, compression_params, orient_params) @pytest.fixture(params=params) def json_files(request, tmp_path_factory, pdf): index, compression, orient = request.param if index is False and orient not in ("split", "table"): pytest.skip( "'index=False' is only valid when 'orient' is 'split' or " "'table'" ) if index is False and orient == "table": pytest.skip("'index=False' isn't valid when 'orient' is 'table'") fname_df = tmp_path_factory.mktemp("json") / "test_df.json" fname_series = tmp_path_factory.mktemp("json") / "test_series.json" pdf.to_json(fname_df, index=index, compression=compression, orient=orient) pdf["col_int32"].to_json( fname_series, index=index, compression=compression, orient=orient ) return (fname_df, fname_series, orient, compression) @pytest.mark.filterwarnings("ignore:Strings are not yet supported") @pytest.mark.filterwarnings("ignore:Using CPU") def test_json_reader(json_files): path_df, path_series, orient, compression = json_files expect_df = pd.read_json(path_df, orient=orient, compression=compression) got_df = cudf.read_json(path_df, orient=orient, compression=compression) if len(expect_df) == 0: expect_df = expect_df.reset_index(drop=True) expect_df.columns = expect_df.columns.astype("object") if len(got_df) == 0: got_df = got_df.reset_index(drop=True) assert_eq(expect_df, got_df, check_categorical=False) # Only these orients are allowed for Series, but isn't enforced by Pandas if orient in ("split", "records", "index"): expect_series = pd.read_json( path_series, orient=orient, compression=compression, typ="series" ) got_series = cudf.read_json( path_series, orient=orient, compression=compression, typ="series" ) if len(expect_series) == 0: expect_series = expect_series.reset_index(drop=True) if len(got_df) == 0: got_series = got_series.reset_index(drop=True) assert_eq(expect_series, got_series) @pytest.mark.filterwarnings("ignore:Can't infer compression") @pytest.mark.filterwarnings("ignore:Using CPU") def test_json_writer(tmpdir, pdf, gdf): pdf_df_fname = tmpdir.join("pdf_df.json") gdf_df_fname = tmpdir.join("gdf_df.json") pdf.to_json(pdf_df_fname) gdf.to_json(gdf_df_fname) assert os.path.exists(pdf_df_fname) assert os.path.exists(gdf_df_fname) expect_df = pd.read_json(pdf_df_fname) got_df = pd.read_json(gdf_df_fname) assert_eq(expect_df, got_df) for column in pdf.columns: pdf_series_fname = tmpdir.join(column + "_" + "pdf_series.json") gdf_series_fname = tmpdir.join(column + "_" + "gdf_series.json") pdf[column].to_json(pdf_series_fname) gdf[column].to_json(gdf_series_fname) assert os.path.exists(pdf_series_fname) assert os.path.exists(gdf_series_fname) expect_series = pd.read_json(pdf_series_fname, typ="series") got_series = pd.read_json(gdf_series_fname, typ="series") assert_eq(expect_series, got_series) # Make sure results align for regular strings, not just files pdf_string = pdf[column].to_json() gdf_string = pdf[column].to_json() assert_eq(pdf_string, gdf_string) @pytest.mark.parametrize( "lines", [True, False], ids=["lines=True", "lines=False"] ) def test_cudf_json_writer(pdf, lines): # removing datetime column because pandas doesn't support it for col_name in pdf.columns: if "datetime" in col_name: pdf.drop(col_name, axis=1, inplace=True) gdf = cudf.DataFrame.from_pandas(pdf) pdf_string = pdf.to_json(orient="records", lines=lines) gdf_string = gdf.to_json(orient="records", lines=lines, engine="cudf") assert_eq(pdf_string, gdf_string) gdf_string = gdf.to_json( orient="records", lines=lines, engine="cudf", rows_per_chunk=8 ) assert_eq(pdf_string, gdf_string) def test_cudf_json_writer_read(gdf_writer_types): dtypes = { col_name: col_name[len("col_") :] for col_name in gdf_writer_types.columns } gdf_string = gdf_writer_types.to_json( orient="records", lines=True, engine="cudf" ) gdf2 = cudf.read_json( StringIO(gdf_string), lines=True, engine="cudf", dtype=dict(dtypes), ) pdf2 = pd.read_json(StringIO(gdf_string), lines=True, dtype=dict(dtypes)) # Bug in pandas https://github.com/pandas-dev/pandas/issues/28558 if pdf2.empty: pdf2.reset_index(drop=True, inplace=True) pdf2.columns = pdf2.columns.astype("object") assert_eq(pdf2, gdf2) @pytest.mark.parametrize( "jsonl_string, expected", [ # fixed width ("""{"a":10, "b":1.1}\n {"a":20, "b":2.1}\n""", None), # simple list ("""{"a":[1, 2, 3], "b":1.1}\n {"a":[]}\n""", None), # simple struct ("""{"a":{"c": 123 }, "b":1.1}\n {"a": {"c": 456}}\n""", None), # list of lists ("""{"a":[[], [1, 2], [3, 4]], "b":1.1}\n""", None), ("""{"a":[null, [1, 2], [null, 4]], "b":1.1}\n""", None), # list of structs # error ("""{"a":[null, {}], "b":1.1}\n""", None), ( """{"a":[null, {"L": 123}], "b":1.0}\n {"b":1.1}\n {"b":2.1}\n""", None, ), ( """{"a":[{"L": 123}, null], "b":1.0}\n {"b":1.1}\n {"b":2.1}\n""", None, ), # struct of lists ( """{"a":{"L": [1, 2, 3]}, "b":1.1}\n {"a": {"L": [4, 5, 6]}}\n""", None, ), ("""{"a":{"L": [1, 2, null]}, "b":1.1}\n {"a": {"L": []}}\n""", None), # struct of structs ( """{"a":{"L": {"M": 123}}, "b":1.1} {"a": {"L": {"M": 456}}}\n""", None, ), ( """{"a":{"L": {"M": null}}, "b":1.1}\n {"a": {"L": {}}}\n""", """{"a":{"L": {}}, "b":1.1}\n {"a": {"L": {}}}\n""", ), # list of structs of lists ("""{"a":[{"L": [1, 2, 3]}, {"L": [4, 5, 6]}], "b":1.1}\n""", None), ("""{"a":[{"L": [1, 2, null]}, {"L": []}], "b":1.1}\n""", None), # struct of lists of structs ("""{"a":{"L": [{"M": 123}, {"M": 456}]}, "b":1.1}\n""", None), ( """{"a":{"L": [{"M": null}, {}]}, "b":1.1}\n""", """{"a":{"L": [{}, {}]}, "b":1.1}\n""", ), ], ) def test_cudf_json_roundtrip(jsonl_string, expected): gdf = cudf.read_json( StringIO(jsonl_string), lines=True, engine="cudf", # dtype=dict(dtypes), ) expected = jsonl_string if expected is None else expected gdf_string = gdf.to_json( orient="records", lines=True, engine="cudf", include_nulls=False ) assert_eq(gdf_string, expected.replace(" ", "")) @pytest.mark.parametrize("sink", ["string", "file"]) def test_cudf_json_writer_sinks(sink, tmp_path_factory): df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) target = None if sink == "string": target = StringIO() elif sink == "file": target = tmp_path_factory.mktemp("json") / "test_df.json" df.to_json(target, engine="cudf") if sink == "string": assert ( target.getvalue() == '[{"a":1,"b":4},{"a":2,"b":5},{"a":3,"b":6}]' ) elif sink == "file": assert os.path.exists(target) with open(target, "r") as f: assert f.read() == '[{"a":1,"b":4},{"a":2,"b":5},{"a":3,"b":6}]' @pytest.fixture( params=["string", "filepath", "pathobj", "bytes_io", "string_io", "url"] ) def json_input(request, tmp_path_factory): input_type = request.param buffer = "[1, 2, 3]\n[4, 5, 6]\n[7, 8, 9]\n" fname = tmp_path_factory.mktemp("json") / "test_df.json" if not os.path.isfile(fname): with open(str(fname), "w") as fp: fp.write(buffer) if input_type == "string": return buffer if input_type == "filepath": return str(fname) if input_type == "pathobj": return Path(fname) if input_type == "bytes_io": return BytesIO(buffer.encode()) if input_type == "string_io": return StringIO(buffer) if input_type == "url": return Path(fname).as_uri() @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["auto", "cudf", "pandas"]) def test_json_lines_basic(json_input, engine): cu_df = cudf.read_json(json_input, engine=engine, lines=True) pd_df = pd.read_json(json_input, lines=True) assert all(cu_df.dtypes == ["int64", "int64", "int64"]) for cu_col, pd_col in zip(cu_df.columns, pd_df.columns): assert str(cu_col) == str(pd_col) np.testing.assert_array_equal(pd_df[pd_col], cu_df[cu_col].to_numpy()) @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["auto", "cudf"]) def test_json_lines_multiple(tmpdir, json_input, engine): tmp_file1 = tmpdir.join("MultiInputs1.json") tmp_file2 = tmpdir.join("MultiInputs2.json") pdf = pd.read_json(json_input, lines=True) pdf.to_json(tmp_file1, compression="infer", lines=True, orient="records") pdf.to_json(tmp_file2, compression="infer", lines=True, orient="records") cu_df = cudf.read_json([tmp_file1, tmp_file2], engine=engine, lines=True) pd_df = pd.concat([pdf, pdf]) assert all(cu_df.dtypes == ["int64", "int64", "int64"]) for cu_col, pd_col in zip(cu_df.columns, pd_df.columns): assert str(cu_col) == str(pd_col) np.testing.assert_array_equal(pd_df[pd_col], cu_df[cu_col].to_numpy()) @pytest.mark.parametrize("engine", ["auto", "cudf"]) def test_json_read_directory(tmpdir, json_input, engine): pdf = pd.read_json(json_input, lines=True) pdf.to_json( tmpdir.join("MultiInputs1.json"), compression="infer", lines=True, orient="records", ) pdf.to_json( tmpdir.join("MultiInputs2.json"), compression="infer", lines=True, orient="records", ) pdf.to_json( tmpdir.join("MultiInputs3.json"), compression="infer", lines=True, orient="records", ) cu_df = cudf.read_json(tmpdir, engine=engine, lines=True) pd_df = pd.concat([pdf, pdf, pdf]) assert all(cu_df.dtypes == ["int64", "int64", "int64"]) for cu_col, pd_col in zip(cu_df.columns, pd_df.columns): assert str(cu_col) == str(pd_col) np.testing.assert_array_equal(pd_df[pd_col], cu_df[cu_col].to_numpy()) def test_json_lines_byte_range(json_input): # include the first row and half of the second row # should parse the first two rows df = cudf.read_json( copy.deepcopy(json_input), lines=True, byte_range=(0, 15) ) assert df.shape == (2, 3) # include half of the second row and half of the third row # should parse only the third row df = cudf.read_json( copy.deepcopy(json_input), lines=True, byte_range=(15, 10) ) assert df.shape == (1, 3) # include half of the second row and entire third row # should parse only the third row df = cudf.read_json( copy.deepcopy(json_input), lines=True, byte_range=(15, 0) ) assert df.shape == (1, 3) # include half of the second row till past the end of the file # should parse only the third row df = cudf.read_json( copy.deepcopy(json_input), lines=True, byte_range=(10, 50) ) assert df.shape == (1, 3) def test_json_lines_dtypes(json_input): df = cudf.read_json( json_input, lines=True, dtype={1: "int", 2: "short", 0: "float"} ) assert all(df.dtypes == ["float64", "int64", "int16"]) @pytest.mark.parametrize( "ext, out_comp, in_comp", [ (".geez", "gzip", "gzip"), (".beez", "bz2", "bz2"), (".gz", "gzip", "infer"), (".bz2", "bz2", "infer"), (".data", None, "infer"), (".txt", None, None), ("", None, None), ], ) def test_json_lines_compression(tmpdir, ext, out_comp, in_comp): fname = tmpdir.mkdir("gdf_json").join("tmp_json_compression" + ext) nrows = 20 pd_df = make_numeric_dataframe(nrows, np.int32) pd_df.to_json(fname, compression=out_comp, lines=True, orient="records") cu_df = cudf.read_json( str(fname), compression=in_comp, lines=True, dtype={"col1": "int32", "col2": "int32"}, ) assert_eq(pd_df, cu_df) @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.filterwarnings( "ignore:engine='cudf_legacy' is a deprecated engine." ) def test_json_engine_selection(): json = "[1, 2, 3]" # should use the cudf engine df = cudf.read_json(json, lines=True) # column names are strings when parsing with cudf for col_name in df.columns: assert isinstance(col_name, str) # should use the pandas engine df = cudf.read_json(json, lines=False, engine="pandas") # column names are ints when parsing with pandas for col_name in df.columns: assert isinstance(col_name, int) # should use the pandas engine df = cudf.read_json(json, lines=True, engine="pandas") # column names are ints when parsing with pandas for col_name in df.columns: assert isinstance(col_name, int) # should raise an exception with pytest.raises(ValueError): cudf.read_json(json, lines=False, engine="cudf_legacy") def test_json_bool_values(): buffer = "[true,1]\n[false,false]\n[true,true]" cu_df = cudf.read_json(buffer, lines=True) pd_df = pd.read_json(buffer, lines=True) # types should be ['bool', 'int64'] np.testing.assert_array_equal(pd_df.dtypes, cu_df.dtypes) np.testing.assert_array_equal(pd_df[0], cu_df["0"].to_numpy()) # boolean values should be converted to 0/1 np.testing.assert_array_equal(pd_df[1], cu_df["1"].to_numpy()) cu_df = cudf.read_json( buffer, lines=True, dtype={"0": "bool", "1": "long"} ) np.testing.assert_array_equal(pd_df.dtypes, cu_df.dtypes) @pytest.mark.filterwarnings( "ignore:engine='cudf_legacy' is a deprecated engine." ) @pytest.mark.parametrize( "buffer", [ "[1.0,]\n[null, ]", '{"0":1.0,"1":}\n{"0":null,"1": }', '{ "0" : 1.0 , "1" : }\n{ "0" : null , "1" : }', '{"0":1.0}\n{"1":}', ], ) def test_json_null_literal(buffer): df = cudf.read_json(buffer, lines=True, engine="cudf_legacy") # first column contains a null field, type should be set to float # second column contains only empty fields, type should be set to int8 np.testing.assert_array_equal(df.dtypes, ["float64", "int8"]) np.testing.assert_array_equal( df["0"].to_numpy(na_value=np.nan), [1.0, np.nan] ) np.testing.assert_array_equal(df["1"].to_numpy(na_value=0), [0, 0]) def test_json_bad_protocol_string(): test_string = '{"field": "s3://path"}' expect = pd.DataFrame([{"field": "s3://path"}]) got = cudf.read_json(test_string, lines=True) assert_eq(expect, got) def test_json_corner_case_with_escape_and_double_quote_char_with_pandas( tmpdir, ): fname = tmpdir.mkdir("gdf_json").join("tmp_json_escape_double_quote") pdf = pd.DataFrame( { "a": ['ab"cd', "\\\b", "\r\\", "'"], "b": ["a\tb\t", "\\", '\\"', "\t"], "c": ["aeiou", "try", "json", "cudf"], } ) pdf.to_json(fname, compression="infer", lines=True, orient="records") df = cudf.read_json( fname, compression="infer", lines=True, orient="records" ) pdf = pd.read_json( fname, compression="infer", lines=True, orient="records" ) assert_eq(cudf.DataFrame(pdf), df) def test_json_corner_case_with_escape_and_double_quote_char_with_strings(): str_buffer = StringIO( """{"a":"ab\\"cd","b":"a\\tb\\t","c":"aeiou"} {"a":"\\\\\\b","b":"\\\\","c":"try"} {"a":"\\r\\\\","b":"\\\\\\"","c":"json"} {"a":"\'","b":"\\t","c":"cudf"}""" ) df = cudf.read_json( str_buffer, compression="infer", lines=True, orient="records" ) expected = { "a": ['ab"cd', "\\\b", "\r\\", "'"], "b": ["a\tb\t", "\\", '\\"', "\t"], "c": ["aeiou", "try", "json", "cudf"], } num_rows = df.shape[0] for col_name in df._data: for i in range(num_rows): assert expected[col_name][i] == df[col_name][i] def test_json_to_json_special_characters(): df = cudf.DataFrame( { "'a'": ['ab"cd', "\\\b", "\r\\", "'"], "b": ["a\tb\t", "\\", '\\"', "\t"], "c": ["aeiou", "try", "json", "cudf"], } ) actual = StringIO() df.to_json(actual, engine="cudf", lines=True, orient="records") expected = StringIO() df.to_pandas().to_json(expected, lines=True, orient="records") assert expected.getvalue() == actual.getvalue() @pytest.mark.parametrize( "gdf,pdf", [ ( cudf.DataFrame( { "int col": cudf.Series( [1, 2, None, 2, 2323, 234, None], dtype="int64" ) } ), pd.DataFrame( { "int col": pd.Series( [1, 2, None, 2, 2323, 234, None], dtype=pd.Int64Dtype() ) } ), ), ( cudf.DataFrame( { "int64 col": cudf.Series( [1, 2, None, 2323, None], dtype="int64" ), "string col": cudf.Series( ["abc", "a", None, "", None], dtype="str" ), "float col": cudf.Series( [0.234, None, 234234.2343, None, 0.0], dtype="float64" ), "bool col": cudf.Series( [None, True, False, None, True], dtype="bool" ), "categorical col": cudf.Series( [1, 2, 1, None, 2], dtype="category" ), "datetime col": cudf.Series( [1231233, None, 2323234, None, 1], dtype="datetime64[ns]", ), "timedelta col": cudf.Series( [None, 34687236, 2323234, 1, None], dtype="timedelta64[ns]", ), } ), pd.DataFrame( { "int64 col": pd.Series( [1, 2, None, 2323, None], dtype=pd.Int64Dtype() ), "string col": pd.Series( ["abc", "a", None, "", None], dtype=pd.StringDtype() ), "float col": pd.Series( [0.234, None, 234234.2343, None, 0.0], dtype="float64" ), "bool col": pd.Series( [None, True, False, None, True], dtype=pd.BooleanDtype(), ), "categorical col": pd.Series( [1, 2, 1, None, 2], dtype="category" ), "datetime col": pd.Series( [1231233, None, 2323234, None, 1], dtype="datetime64[ns]", ), "timedelta col": pd.Series( [None, 34687236, 2323234, 1, None], dtype="timedelta64[ns]", ), } ), ), ], ) def test_json_to_json_compare_contents(gdf, pdf): expected_json = pdf.to_json(lines=True, orient="records") with pytest.warns(UserWarning): actual_json = gdf.to_json(lines=True, orient="records") assert expected_json == actual_json @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["cudf", "pandas"]) def test_default_integer_bitwidth(default_integer_bitwidth, engine): buf = BytesIO() pd.DataFrame({"a": range(10)}).to_json(buf, lines=True, orient="records") buf.seek(0) df = cudf.read_json(buf, engine=engine, lines=True, orient="records") assert df["a"].dtype == np.dtype(f"i{default_integer_bitwidth//8}") @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize( "engine", [ pytest.param( "cudf_legacy", marks=pytest.mark.skip( reason="cannot partially set dtypes for cudf json engine" ), ), "pandas", "cudf", ], ) def test_default_integer_bitwidth_partial(default_integer_bitwidth, engine): buf = BytesIO() pd.DataFrame({"a": range(10), "b": range(10, 20)}).to_json( buf, lines=True, orient="records" ) buf.seek(0) df = cudf.read_json( buf, engine=engine, lines=True, orient="records", dtype={"b": "i8"} ) assert df["a"].dtype == np.dtype(f"i{default_integer_bitwidth//8}") assert df["b"].dtype == np.dtype("i8") @pytest.mark.filterwarnings("ignore:Using CPU") @pytest.mark.parametrize("engine", ["cudf", "pandas"]) def test_default_integer_bitwidth_extremes(default_integer_bitwidth, engine): # Test that integer columns in json are _inferred_ as 32 bit columns. buf = StringIO( '{"u8":18446744073709551615, "i8":9223372036854775807}\n' '{"u8": 0, "i8": -9223372036854775808}' ) df = cudf.read_json(buf, engine=engine, lines=True, orient="records") assert df["u8"].dtype == np.dtype(f"u{default_integer_bitwidth//8}") assert df["i8"].dtype == np.dtype(f"i{default_integer_bitwidth//8}") def test_default_float_bitwidth(default_float_bitwidth): # Test that float columns in json are _inferred_ as 32 bit columns. df = cudf.read_json( '{"a": 1.0, "b": 2.5}\n{"a": 3.5, "b": 4.0}', engine="cudf", lines=True, orient="records", ) assert df["a"].dtype == np.dtype(f"f{default_float_bitwidth//8}") assert df["b"].dtype == np.dtype(f"f{default_float_bitwidth//8}") def test_json_nested_basic(): bytes_obj = BytesIO() data = { "c1": [{"f1": "sf11", "f2": "sf21"}, {"f1": "sf12", "f2": "sf22"}], "c2": [["l11", "l21"], ["l12", "l22"]], } pdf = pd.DataFrame(data) pdf.to_json(bytes_obj, orient="records") df = cudf.read_json(bytes_obj, engine="cudf", orient="records") bytes_obj.seek(0) pdf = pd.read_json(bytes_obj, orient="records") assert_eq(pdf, df) @pytest.mark.parametrize( "data", [ { "c1": [{"f1": "sf11", "f2": "sf21"}, {"f1": "sf12", "f2": "sf22"}], "c2": [["l11", "l21"], ["l12", "l22"]], }, # Essential test case to handle omissions { "c1": [{"f2": "sf21"}, {"f1": "sf12"}], "c2": [["l11", "l21"], []], }, # empty input {}, ], ) @pytest.mark.parametrize("lines", [True, False]) def test_json_nested_lines(data, lines): bytes = BytesIO() pdf = pd.DataFrame(data) pdf.to_json(bytes, orient="records", lines=lines) bytes.seek(0) df = cudf.read_json(bytes, engine="cudf", orient="records", lines=lines) bytes.seek(0) pdf = pd.read_json(bytes, orient="records", lines=lines) # In the second test-case we need to take a detour via pyarrow # Pandas omits "f1" in first row, so we have to enforce a common schema, # such that pandas would have the f1 member with null # Also, pyarrow chooses to select different ordering of a nested column # children though key-value pairs are correct. pa_table_pdf = pa.Table.from_pandas( pdf, schema=df.to_arrow().schema, safe=False ) assert df.to_arrow().equals(pa_table_pdf) def test_json_nested_data(): json_str = ( '[{"0":{},"2":{}},{"1":[[""],[]],"2":{"2":""}},' '{"0":{"a":"1"},"2":{"0":"W&RR=+I","1":""}}]' ) df = cudf.read_json(StringIO(json_str), engine="cudf", orient="records") pdf = pd.read_json(StringIO(json_str), orient="records") pdf.columns = pdf.columns.astype("str") pa_table_pdf = pa.Table.from_pandas( pdf, schema=df.to_arrow().schema, safe=False ) assert df.to_arrow().equals(pa_table_pdf) def test_json_empty_types(): json_str = """ {} {"a": [], "b": {}} {"a": []} {"b": {}} {"c": {"d": []}} {"e": [{}]} """ df = cudf.read_json(StringIO(json_str), orient="records", lines=True) pdf = pd.read_json(StringIO(json_str), orient="records", lines=True) assert_eq(df, pdf) def test_json_types_data(): # 0:<0:string,1:float> # 1:list<int> # 2:<0:bool> json_str = ( '[{"0":null,"2":{}},' '{"1":[123],"0":{"0":"foo","1":123.4},"2":{"0":false}},' '{"0":{},"1":[],"2":{"0":null}}]' ) df = cudf.read_json(StringIO(json_str), engine="cudf", orient="records") pdf = pd.read_json(StringIO(json_str), orient="records") pdf.columns = pdf.columns.astype("str") pa_table_pdf = pa.Table.from_pandas( pdf, schema=df.to_arrow().schema, safe=False ) assert df.to_arrow().equals(pa_table_pdf) @pytest.mark.parametrize( "col_type,json_str,expected_data", [ # without quotes ("int", '[{"k": 1}, {"k": 2}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4]), # with quotes ("int", '[{"k": "1"}, {"k": "2"}]', [1, 2]), # with quotes, mixed ("int", '[{"k": "1"}, {"k": "2"}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4]), # with quotes, null, mixed ( "int", '[{"k": "1"}, {"k": "2"}, {"k": null}, {"k": 4}]', [1, 2, None, 4], ), # without quotes, null ( "int", '[{"k": 1}, {"k": 2}, {"k": null}, {"k": 4}]', [1, 2, None, 4], ), # without quotes ("float", '[{"k": 1}, {"k": 2}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4]), # with quotes ("float", '[{"k": "1"}, {"k": "2"}]', [1, 2]), # with quotes, mixed ( "float", '[{"k": "1"}, {"k": "2"}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4], ), # with quotes, null, mixed ( "float", '[{"k": "1"}, {"k": "2"}, {"k": null}, {"k": 4}]', [1, 2, None, 4], ), # with quotes, NAN ( "float", '[{"k": "1"}, {"k": "2"}, {"k": NaN}, {"k": "4"}]', [1, 2, np.nan, 4], ), # without quotes ("str", '[{"k": 1}, {"k": 2}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4]), # with quotes ("str", '[{"k": "1"}, {"k": "2"}]', [1, 2]), # with quotes, mixed ("str", '[{"k": "1"}, {"k": "2"}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4]), # with quotes, null, mixed ( "str", '[{"k": "1"}, {"k": "2"}, {"k": null}, {"k": 4}]', [1, 2, None, 4], ), # without quotes, null ( "str", '[{"k": 1}, {"k": 2}, {"k": null}, {"k": 4}]', [1, 2, None, 4], ), ], ) def test_json_quoted_values_with_schema(col_type, json_str, expected_data): actual = cudf.read_json( StringIO(json_str), engine="cudf", orient="records", dtype={"k": col_type}, ) expected = cudf.DataFrame({"k": expected_data}, dtype=col_type) assert_eq(actual, expected) @pytest.mark.parametrize( "col_type,json_str,expected_data", [ # with quotes, mixed ("int", '[{"k": "1"}, {"k": "2"}, {"k": 3}, {"k": 4}]', [1, 2, 3, 4]), # with quotes, null, mixed ( "int", '[{"k": "1"}, {"k": "2"}, {"k": null}, {"k": 4}]', [1, 2, None, 4], ), # with quotes, mixed ( "str", '[{"k": "1"}, {"k": "2"}, {"k": 3}, {"k": 4}]', ["1", "2", "3", "4"], ), # with quotes, null, mixed ( "str", '[{"k": "1"}, {"k": "2"}, {"k": null}, {"k": 4}]', ["1", "2", None, "4"], ), ], ) def test_json_quoted_values(col_type, json_str, expected_data): actual = cudf.read_json( StringIO(json_str), engine="cudf", orient="records", dtype={"k": col_type}, ) expected = cudf.DataFrame({"k": expected_data}, dtype=col_type) assert_eq(expected, actual) assert_eq(expected_data, actual.k.to_arrow().to_pylist()) @pytest.mark.parametrize( "keep_quotes,result", [ ( True, { "c1": [ {"f1": '"sf11"', "f2": '"sf21"'}, {"f1": '"sf12"', "f2": '"sf22"'}, ], "c2": [['"l11"', '"l21"'], ['"l12"', '"l22"']], }, ), ( False, { "c1": [ {"f1": "sf11", "f2": "sf21"}, {"f1": "sf12", "f2": "sf22"}, ], "c2": [["l11", "l21"], ["l12", "l22"]], }, ), ], ) def test_json_keep_quotes(keep_quotes, result): bytes_file = BytesIO() data = { "c1": [{"f1": "sf11", "f2": "sf21"}, {"f1": "sf12", "f2": "sf22"}], "c2": [["l11", "l21"], ["l12", "l22"]], } pdf = pd.DataFrame(data) pdf.to_json(bytes_file, orient="records", lines=True) actual = cudf.read_json( bytes_file, orient="records", lines=True, keep_quotes=keep_quotes, ) expected = pd.DataFrame(result) assert_eq(actual, expected) def test_json_dtypes_nested_data(): # a: StructDtype({'a': StructDtype({'b': dtype('float64')}), # 'b': dtype('int64')}) # b: ListDtype(ListDtype(float64)) actual_json_str = ( '{"a":{"a":{"b":10.0},"b":11},"b":[[10.0,1.1],[12.0,23.0]]}\n' '{"a":{"a":{"b":107.0},"b":5},"b":[[10.0,11.2],[12.0,0.23]]}\n' '{"a":{"a":{"b":50.7},"b":2},"b":[[10.0,11.3],[12.0,2.3]]}\n' '{"a":{"a":{"b":1.2},"b":67},"b":[[6.0,7.0]]}\n' '{"a":{"a":{"b":40.1},"b":1090},"b":null}\n' ) """ In [3]: df Out[3]: a b 0 {'a': {'b': 10.0}, 'b': 11} [[10.0, 1.1], [12.0, 23.0]] 1 {'a': {'b': 107.0}, 'b': 5} [[10.0, 11.2], [12.0, 0.23]] 2 {'a': {'b': 50.7}, 'b': 2} [[10.0, 11.3], [12.0, 2.3]] 3 {'a': {'b': 1.2}, 'b': 67} [[6.0, 7.0]] 4 {'a': {'b': 40.1}, 'b': 1090} None """ # a: StructDtype({'a': StructDtype({'b': dtype('int64')}), # 'b': dtype('float64')}) # b: ListDtype(ListDtype(int64)) expected_json_str = ( '{"a":{"a":{"b":10},"b":11.0},"b":[[10,1],[12,23]]}\n' '{"a":{"a":{"b":107},"b":5.0},"b":[[10,11],[12,0]]}\n' '{"a":{"a":{"b":50},"b":2.0},"b":[[10,11],[12,2]]}\n' '{"a":{"a":{"b":1},"b":67.0},"b":[[6,7]]}\n' '{"a":{"a":{"b":40},"b":1090.0},"b":null}\n' ) """ In [7]: df Out[7]: a b 0 {'a': {'b': 10}, 'b': 11.0} [[10, 1], [12, 23]] 1 {'a': {'b': 107}, 'b': 5.0} [[10, 11], [12, 0]] 2 {'a': {'b': 50}, 'b': 2.0} [[10, 11], [12, 2]] 3 {'a': {'b': 1}, 'b': 67.0} [[6, 7]] 4 {'a': {'b': 40}, 'b': 1090.0} None """ df = cudf.read_json( StringIO(actual_json_str), engine="cudf", orient="records", lines=True, dtype={ "a": cudf.StructDtype( { "a": cudf.StructDtype({"b": cudf.dtype("int64")}), "b": cudf.dtype("float64"), } ), "b": cudf.ListDtype(cudf.ListDtype("int64")), }, ) pdf = pd.read_json( StringIO(expected_json_str), orient="records", lines=True ) pdf.columns = pdf.columns.astype("str") pa_table_pdf = pa.Table.from_pandas( pdf, schema=df.to_arrow().schema, safe=False ) assert df.to_arrow().equals(pa_table_pdf) @pytest.mark.parametrize( "tag, data", [ ( "normal", """\ {"a": 1, "b": 2} {"a": 3, "b": 4}""", ), ( "multiple", """\ { "a": { "y" : 6}, "b" : [1, 2, 3], "c": 11 } { "a": { "y" : 6}, "b" : [4, 5 ], "c": 12 } { "a": { "y" : 6}, "b" : [6 ], "c": 13 } { "a": { "y" : 6}, "b" : [7 ], "c": 14 }""", ), ( "reordered", """\ { "a": { "y" : 6}, "b" : [1, 2, 3], "c": 11 } { "a": { "y" : 6}, "c": 12 , "b" : [4, 5 ]} { "b" : [6 ], "a": { "y" : 6}, "c": 13} { "c" : 14, "a": { "y" : 6}, "b" : [7 ]} """, ), ( "missing", """ { "a": { "y" : 6}, "b" : [1, 2, 3], "c": 11 } { "a": { "y" : 6}, "b" : [4, 5 ] } { "a": { "y" : 6}, "c": 13 } { "a": { "y" : 6}, "b" : [7 ], "c": 14 } """, ), pytest.param( "dtype_mismatch", """\ { "a": { "y" : 6}, "b" : [1, 2, 3], "c": 11 } { "a": { "y" : 6}, "b" : [4, 5 ], "c": 12 } { "a": { "y" : 6}, "b" : [6 ], "c": 13 } { "a": { "y" : 6}, "b" : [7 ], "c": 14.0 }""", ), ], ) class TestNestedJsonReaderCommon: @pytest.mark.parametrize("chunk_size", [10, 100, 1024, 1024 * 1024]) def test_chunked_nested_json_reader(self, tag, data, chunk_size): expected = cudf.read_json(StringIO(data), lines=True) source_size = len(data) chunks = [] for chunk_start in range(0, source_size, chunk_size): chunks.append( cudf.read_json( StringIO(data), byte_range=[chunk_start, chunk_size], lines=True, ) ) df = cudf.concat(chunks, ignore_index=True) assert expected.to_arrow().equals(df.to_arrow()) def test_order_nested_json_reader(self, tag, data): expected = pd.read_json(StringIO(data), lines=True) target = cudf.read_json(StringIO(data), lines=True) if tag == "dtype_mismatch": with pytest.raises(AssertionError): # pandas parses integer values in float representation # as integer assert pa.Table.from_pandas(expected).equals(target.to_arrow()) elif tag == "missing": with pytest.raises(AssertionError): # pandas inferences integer with nulls as float64 assert pa.Table.from_pandas(expected).equals(target.to_arrow()) else: assert pa.Table.from_pandas(expected).equals(target.to_arrow()) def test_json_round_trip_gzip(): df = cudf.DataFrame({"a": [1, 2, 3], "b": ["abc", "def", "ghi"]}) bytes = BytesIO() with gzip.open(bytes, mode="wb") as fo: with pytest.warns(UserWarning): df.to_json(fo, orient="records", lines=True) bytes.seek(0) with gzip.open(bytes, mode="rb") as fo: written_df = cudf.read_json(fo, orient="records", lines=True) assert_eq(written_df, df) # Testing writing from middle of the file. loc = bytes.tell() with gzip.open(bytes, mode="wb") as fo: fo.seek(loc) with pytest.warns(UserWarning): df.to_json(fo, orient="records", lines=True) bytes.seek(loc) with gzip.open(bytes, mode="rb") as fo: fo.seek(loc) written_df = cudf.read_json(fo, orient="records", lines=True) assert_eq(written_df, df) @pytest.mark.parametrize( "data", [ # # empty input # assert failing due to missing index size information "", "[]", "[]\n[]\n[]", # simple values """[1]\n[2]\n[3]""", """[1, 2, 3]\n[4, 5, 6]\n[7, 8, 9]""", # nulls """[1, 2, 3]\n[4, 5, null]\n[7, 8, 9]""", """[1, 2, 3]\n[4, 5, null]\n[7, 8, 9]\n[null, null, null]""", """[1, 2, 3]\n[4, 5, null]\n[]""", # missing """[1, 2, 3]\n[4, 5 ]\n[7, 8, 9]""", """[1, 2, 3]\n[4, 5, 6]\n[7, 8, 9, 10]""", """[1, 2, 3]\n[4, 5, 6, {}]\n[7, 8, 9]""", """[1, 2, 3]\n[4, 5, 6, []]\n[7, 8, 9]""", """[1, 2, 3]\n[4, 5, 6, {"a": 10}]\n[7, 8, 9]""", """[1, 2, 3]\n[4, 5, 6, [10]]\n[7, 8, 9]""", # mixed """[1, 2, 3]\n[4, 5, {}]\n[7, 8, 9]""", """[1, 2, {}]\n[4, 5, 6]\n[7, 8, 9]""", """[1, 2, 3]\n[4, 5, [6]]\n[7, 8, 9]""", """[1, 2, [3]]\n[4, 5, 6]\n[7, 8, 9]""", # nested """[1, 2, [3]]\n[4, 5, [6]]\n[7, 8, [9]]""", """[1, 2, {"a": 3}]\n[4, 5, {"b": 6}]\n[7, 8, {"c": 9}]""", """[1, 2, [{"a": 3}, {"a": 3}]] [4, 5, [{"b": 6}, {"b": 6}, {}, {"b": 6}]] [7, 8, [{}]]""", """[1, 2, {"a": [3, 3, 3]}] [4, 5, {"b": [6, 6]}] [7, 8, {"c": 9}]""", """[1, 2, [{"a": 3}, {"a": null}]] [4, 5, [{"b": [6.0, 6, 06]}, {"b": [6]}, {}, {"b": null}]] [7, 8, [{}]]""", ], ) @pytest.mark.parametrize("lines", [True, False]) def test_json_array_of_arrays(data, lines): data = data if lines else "[" + data.replace("\n", ",") + "]" pdf = pd.read_json(data, orient="values", lines=lines) df = cudf.read_json( StringIO(data), engine="cudf", orient="values", lines=lines, ) # if mixed with dict/list type, replace other types with None. if 2 in pdf.columns and any( pdf[2].apply(lambda x: isinstance(x, dict) or isinstance(x, list)) ): pdf[2] = pdf[2].apply( lambda x: x if isinstance(x, dict) or isinstance(x, list) else None ) # TODO: Replace string column names with integer column names # for values orient in cudf json reader pdf.rename(columns={name: str(name) for name in pdf.columns}, inplace=True) # assert_eq(pdf, df) pa_table_pdf = pa.Table.from_pandas( pdf, schema=df.to_arrow().schema, safe=False ) assert df.to_arrow().equals(pa_table_pdf) @pytest.mark.parametrize( "jsonl_string", [ # simple list with mixed types """{"a":[123, {}], "b":1.1}""", """{"a":[123, {"0": 123}], "b":1.0}\n {"b":1.1}\n {"b":2.1}""", """{"a":[{"L": 123}, 123], "b":1.0}\n {"b":1.1}\n {"b":2.1}""", """{"a":[123, {"0": 123}, 12.3], "b":1.0}\n {"b":1.1}\n {"b":2.1}""", """{"a":[123, {"0": 123}, null], "b":1.0}\n {"b":1.1}\n {"b":2.1}""", """{"a":["123", {"0": 123}], "b":1.0}\n {"b":1.1}\n {"b":2.1}""", """{"a":[{"0": 123}, "123"], "b":1.0}\n {"b":1.1}\n {"b":2.1}""", """{"a":["123", {"0": 123}, "123"], "b":1.0}\n {"b":1.1}""", """{"a":[123]}\n {"a":[{"0": 123}], "b":1.0}\n {"b":1.1}""", """{"a":[{"0": 123}]}\n {"a":[123], "b":1.0}\n {"b":1.1}""", """{"a":[{"0": 123}]}\n {"a": []}\n {"a":[123], "b":1.0}\n{"b":1.1}""", """{"b":1.0, "a":[{"0": 123}]}\n {"a":[123]}\n {"b":1.1}\n{"a": []}""", """{"a": []}\n {"a":[{"0": 123}]}\n {"a":[123], "b":1.0}\n{"b":1.1}""", """{"a": []}\n {"a":[123], "b":1.0}\n {"a":[{"0": 123}]}\n{"b":1.1}""", # nested list with mixed types """{"a":[123, [{"0": 123}, {}]], "b":1.0} {"b":1.1} {"a":[]} {"a":[123]} {"a":[[123], []]}""", """{"a":[], "b":1.0} {"a":[[[456]]]} {"a":[[123]]} {"a":[123]}""", """{"a":[123], "b":1.0} {"b":1.1} {"b":2.1} {"a":[[[[[[]]]]]]}""", """{"a":[123], "b":1.0} {"a":[[[[[[]]]]]]} {"a":[[[[[[]]]]], [[[[[]]]]]]} {"a":[[[[[[]]]], [[[[]]]]]]} {"a":[[[[[[]]], [[[]]]]]]} {"a":[[[[[[]], [[]]]]]]} {"a":[[[[[[], 123, []]]]]]}""", # mixed elements in multiple columns """{"a":[123, {"0": 123}], "b":1.0} {"c": ["abc"], "b":1.1} {"c": ["abc", []] }""", ], ) def test_json_nested_mixed_types_in_list(jsonl_string): # utility function for this test: # replace list elements with None if it has dict and non-dict (ignore None) def _replace_in_list(list_to_replace, replace_items): return [ _replace_in_list(x, replace_items) if isinstance(x, list) else None if x in replace_items else x for x in list_to_replace ] def _replace_with_nulls(df, replace_items): for col in df.columns: if df[col].dtype == "object": df[col] = df[col].apply( lambda x: _replace_in_list(x, replace_items) if isinstance(x, list) else x ) return df # both json lines and json string tested. json_string = "[" + jsonl_string.replace("\n", ",") + "]" pdf = pd.read_json(jsonl_string, orient="records", lines=True) pdf2 = pd.read_json(json_string, orient="records", lines=False) assert_eq(pdf, pdf2) # replace list elements with None if it has dict and non-dict # in above test cases, these items are mixed with dict/list items # so, replace them with None. pdf = _replace_with_nulls(pdf, [123, "123", 12.3, "abc"]) gdf = cudf.read_json( StringIO(jsonl_string), orient="records", lines=True, ) gdf2 = cudf.read_json( StringIO(json_string), engine="cudf", orient="records", lines=False, ) if """[{"0": 123}, {}]""" not in jsonl_string: # {} in pandas is represented as {"0": None} in cudf assert_eq(gdf, pdf) assert_eq(gdf2, pdf) pa_table_pdf = pa.Table.from_pandas( pdf, schema=gdf.to_arrow().schema, safe=False ) assert gdf.to_arrow().equals(pa_table_pdf) assert gdf2.to_arrow().equals(pa_table_pdf) @pytest.mark.parametrize( "jsonl_string", [ # mixed type in list (in different order) """{"a":[[{"0": 123}, {}], {"1": 321}], "b":1.0}""", """{"a":[{"1": 321}, [{"0": 123}, {}], ], "b":1.0}""", """{"a":[123, [{"0": 123}, {}], {"1": 321}], "b":1.0}""", """{"a":[null, [{"0": 123}, {}], {"1": 321}], "b":1.0}""", # mixed type in struct (in different order) """{"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0} {"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}""", """{"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0} {"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0}""", """{"a": {"b": {"0": 123}, "c": null}, "d":1.0} {"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0} {"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}""", """{"a": {"b": {"0": 123}, "c": 123}, "d":1.0} {"a": {"b": {"0": 123}, "c": {"1": 321}}, "d":1.0} {"a": {"b": {"0": 123}, "c": [123, 123]}, "d":1.0}""", ], ) def test_json_nested_mixed_types_error(jsonl_string): # mixing list and struct should raise an exception with pytest.raises(RuntimeError): cudf.read_json( StringIO(jsonl_string), orient="records", lines=True, )
0
rapidsai_public_repos/cudf/python/cudf/cudf
rapidsai_public_repos/cudf/python/cudf/cudf/tests/test_numpy_interop.py
# Copyright (c) 2019-2022, NVIDIA CORPORATION. import numpy as np import pytest from cudf import DataFrame, Series from cudf.testing._utils import assert_eq def test_to_records_noindex(): df = DataFrame() df["a"] = aa = np.arange(10, dtype=np.int32) df["b"] = bb = np.arange(10, 20, dtype=np.float64) rec = df.to_records(index=False) assert rec.dtype.names == ("a", "b") np.testing.assert_array_equal(rec["a"], aa) np.testing.assert_array_equal(rec["b"], bb) def test_to_records_withindex(): df = DataFrame() df["a"] = aa = np.arange(10, dtype=np.int32) df["b"] = bb = np.arange(10, 20, dtype=np.float64) rec_indexed = df.to_records(index=True) assert rec_indexed.size == len(aa) assert rec_indexed.dtype.names == ("index", "a", "b") np.testing.assert_array_equal(rec_indexed["a"], aa) np.testing.assert_array_equal(rec_indexed["b"], bb) np.testing.assert_array_equal(rec_indexed["index"], np.arange(10)) @pytest.mark.parametrize("columns", [None, ("a", "b"), ("a",), ("b",)]) def test_from_records_noindex(columns): recdtype = np.dtype([("a", np.int32), ("b", np.float64)]) rec = np.recarray(10, dtype=recdtype) rec.a = aa = np.arange(10, dtype=np.int32) rec.b = bb = np.arange(10, 20, dtype=np.float64) df = DataFrame.from_records(rec, columns=columns) if columns and "a" in columns: assert_eq(aa, df["a"].values) if columns and "b" in columns: assert_eq(bb, df["b"].values) assert_eq(np.arange(10), df.index.values) @pytest.mark.parametrize("columns", [None, ("a", "b"), ("a",), ("b",)]) def test_from_records_withindex(columns): recdtype = np.dtype( [("index", np.int64), ("a", np.int32), ("b", np.float64)] ) rec = np.recarray(10, dtype=recdtype) rec.index = ii = np.arange(30, 40) rec.a = aa = np.arange(10, dtype=np.int32) rec.b = bb = np.arange(10, 20, dtype=np.float64) df = DataFrame.from_records(rec, index="index") if columns and "a" in columns: assert_eq(aa, df["a"].values) if columns and "b" in columns: assert_eq(bb, df["b"].values) assert_eq(ii, df.index.values) def test_numpy_non_contiguious(): recdtype = np.dtype([("index", np.int64), ("a", np.int32)]) rec = np.recarray(10, dtype=recdtype) rec.index = np.arange(30, 40) rec.a = aa = np.arange(20, dtype=np.int32)[::2] assert rec.a.flags["C_CONTIGUOUS"] is False gdf = DataFrame.from_records(rec, index="index") assert_eq(aa, gdf["a"].values) @pytest.mark.parametrize( "data", [ Series([1, 2, 3, -12, 12, 44]), Series([1, 2, 3, -12, 12, 44], dtype="str"), Series([1, 2, 3, -12, 12, 44]).index, DataFrame({"a": [1, 2, 3, -1234], "b": [0.1, 0.2222, 0.4, -3.14]}), DataFrame( {"a": [1, 2, 3, -1234], "b": [0.1, 0.2222, 0.4, -3.14]} ).index, ], ) @pytest.mark.parametrize("dtype", [None, "float", "int", "str"]) def test_series_dataframe__array__(data, dtype): gs = data with pytest.raises(TypeError): gs.__array__(dtype=dtype)
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_sorting.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_constructing.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_conversion.py
# Copyright (c) 2023, NVIDIA CORPORATION. import pandas as pd import cudf from cudf.testing._utils import assert_eq def test_convert_dtypes(): data = { "a": [1, 2, 3], "b": [1, 2, 3], "c": [1.1, 2.2, 3.3], "d": [1.0, 2.0, 3.0], "e": [1.0, 2.0, 3.0], "f": ["a", "b", "c"], "g": ["a", "b", "c"], "h": ["2001-01-01", "2001-01-02", "2001-01-03"], } dtypes = [ "int8", "int64", "float32", "float32", "float64", "str", "category", "datetime64[ns]", ] df = pd.DataFrame( { k: pd.Series(v, dtype=d) for k, v, d in zip(data.keys(), data.values(), dtypes) } ) gdf = cudf.DataFrame.from_pandas(df) expect = df.convert_dtypes() got = gdf.convert_dtypes().to_pandas(nullable=True) assert_eq(expect, got)
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_reindexing.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_timeseries.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_computation.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_io_serialization.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_missing.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_indexing.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_attributes.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_function_application.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_binary_operations.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_selecting.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0
rapidsai_public_repos/cudf/python/cudf/cudf/tests
rapidsai_public_repos/cudf/python/cudf/cudf/tests/dataframe/test_reshaping.py
# Copyright (c) 2023, NVIDIA CORPORATION.
0