text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Shuffle the order of the data in this axis based on the `func`. <END_TASK> <USER_TASK:> Description: def shuffle(self, func, num_splits=None, **kwargs): """Shuffle the order of the data in this axis based on the `func`. Extends `BaseFrameAxisPartition.shuffle`. :param func: :param num_splits: :param kwargs: :return: """
if num_splits is None: num_splits = len(self.list_of_blocks) args = [self.axis, func, num_splits, kwargs] args.extend(self.list_of_blocks) return [ PyarrowOnRayFramePartition(obj) for obj in deploy_ray_axis_func._remote(args, num_return_vals=num_splits) ]
<SYSTEM_TASK:> Apply a function to the object stored in this partition. <END_TASK> <USER_TASK:> Description: def apply(self, func, **kwargs): """Apply a function to the object stored in this partition. Note: It does not matter if func is callable or an ObjectID. Ray will handle it correctly either way. The keyword arguments are sent as a dictionary. Args: func: The function to apply. Returns: A RayRemotePartition object. """
oid = self.oid self.call_queue.append((func, kwargs)) def call_queue_closure(oid_obj, call_queues): for func, kwargs in call_queues: if isinstance(func, ray.ObjectID): func = ray.get(func) if isinstance(kwargs, ray.ObjectID): kwargs = ray.get(kwargs) oid_obj = func(oid_obj, **kwargs) return oid_obj oid = deploy_ray_func.remote( call_queue_closure, oid, kwargs={"call_queues": self.call_queue} ) self.call_queue = [] return PyarrowOnRayFramePartition(oid)
<SYSTEM_TASK:> Convert the object stored in this partition to a Pandas DataFrame. <END_TASK> <USER_TASK:> Description: def to_pandas(self): """Convert the object stored in this partition to a Pandas DataFrame. Returns: A Pandas DataFrame. """
dataframe = self.get().to_pandas() assert type(dataframe) is pandas.DataFrame or type(dataframe) is pandas.Series return dataframe
<SYSTEM_TASK:> Put an object in the Plasma store and wrap it in this object. <END_TASK> <USER_TASK:> Description: def put(cls, obj): """Put an object in the Plasma store and wrap it in this object. Args: obj: The object to be put. Returns: A `RayRemotePartition` object. """
return PyarrowOnRayFramePartition(ray.put(pyarrow.Table.from_pandas(obj)))
<SYSTEM_TASK:> Database style join, where common columns in "on" are merged. <END_TASK> <USER_TASK:> Description: def merge( left, right, how="inner", on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=("_x", "_y"), copy=True, indicator=False, validate=None, ): """Database style join, where common columns in "on" are merged. Args: left: DataFrame. right: DataFrame. how: What type of join to use. on: The common column name(s) to join on. If None, and left_on and right_on are also None, will default to all commonly named columns. left_on: The column(s) on the left to use for the join. right_on: The column(s) on the right to use for the join. left_index: Use the index from the left as the join keys. right_index: Use the index from the right as the join keys. sort: Sort the join keys lexicographically in the result. suffixes: Add this suffix to the common names not in the "on". copy: Does nothing in our implementation indicator: Adds a column named _merge to the DataFrame with metadata from the merge about each row. validate: Checks if merge is a specific type. Returns: A merged Dataframe """
if not isinstance(left, DataFrame): raise ValueError( "can not merge DataFrame with instance of type {}".format(type(right)) ) return left.merge( right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, copy=copy, indicator=indicator, validate=validate, )
<SYSTEM_TASK:> Check if is possible distribute a query given that args <END_TASK> <USER_TASK:> Description: def is_distributed(partition_column, lower_bound, upper_bound): """ Check if is possible distribute a query given that args Args: partition_column: column used to share the data between the workers lower_bound: the minimum value to be requested from the partition_column upper_bound: the maximum value to be requested from the partition_column Returns: True for distributed or False if not """
if ( (partition_column is not None) and (lower_bound is not None) and (upper_bound is not None) ): if upper_bound > lower_bound: return True else: raise InvalidArguments("upper_bound must be greater than lower_bound.") elif (partition_column is None) and (lower_bound is None) and (upper_bound is None): return False else: raise InvalidArguments( "Invalid combination of partition_column, lower_bound, upper_bound." "All these arguments should be passed (distributed) or none of them (standard pandas)." )
<SYSTEM_TASK:> Check with the given sql arg is query or table <END_TASK> <USER_TASK:> Description: def is_table(engine, sql): """ Check with the given sql arg is query or table Args: engine: SQLAlchemy connection engine sql: SQL query or table name Returns: True for table or False if not """
if engine.dialect.has_table(engine, sql): return True return False
<SYSTEM_TASK:> Extract all useful infos from the given table <END_TASK> <USER_TASK:> Description: def get_table_metadata(engine, table): """ Extract all useful infos from the given table Args: engine: SQLAlchemy connection engine table: table name Returns: Dictionary of infos """
metadata = MetaData() metadata.reflect(bind=engine, only=[table]) table_metadata = Table(table, metadata, autoload=True) return table_metadata
<SYSTEM_TASK:> Extract columns names and python typos from metadata <END_TASK> <USER_TASK:> Description: def get_table_columns(metadata): """ Extract columns names and python typos from metadata Args: metadata: Table metadata Returns: dict with columns names and python types """
cols = OrderedDict() for col in metadata.c: name = str(col).rpartition(".")[2] cols[name] = col.type.python_type.__name__ return cols
<SYSTEM_TASK:> Extract columns names and python typos from query <END_TASK> <USER_TASK:> Description: def get_query_columns(engine, query): """ Extract columns names and python typos from query Args: engine: SQLAlchemy connection engine query: SQL query Returns: dict with columns names and python types """
con = engine.connect() result = con.execute(query).fetchone() values = list(result) cols_names = result.keys() cols = OrderedDict() for i in range(len(cols_names)): cols[cols_names[i]] = type(values[i]).__name__ return cols
<SYSTEM_TASK:> Check partition_column existence and type <END_TASK> <USER_TASK:> Description: def check_partition_column(partition_column, cols): """ Check partition_column existence and type Args: partition_column: partition_column name cols: dict with columns names and python types Returns: None """
for k, v in cols.items(): if k == partition_column: if v == "int": return else: raise InvalidPartitionColumn( "partition_column must be int, and not {0}".format(v) ) raise InvalidPartitionColumn( "partition_column {0} not found in the query".format(partition_column) )
<SYSTEM_TASK:> Return a columns name list and the query string <END_TASK> <USER_TASK:> Description: def get_query_info(sql, con, partition_column): """ Return a columns name list and the query string Args: sql: SQL query or table name con: database connection or url string partition_column: column used to share the data between the workers Returns: Columns name list and query string """
engine = create_engine(con) if is_table(engine, sql): table_metadata = get_table_metadata(engine, sql) query = build_query_from_table(sql) cols = get_table_columns(table_metadata) else: check_query(sql) query = sql.replace(";", "") cols = get_query_columns(engine, query) # TODO allow validation that takes into account edge cases of pandas e.g. "[index]" # check_partition_column(partition_column, cols) cols_names = list(cols.keys()) return cols_names, query
<SYSTEM_TASK:> Put bounders in the query <END_TASK> <USER_TASK:> Description: def query_put_bounders(query, partition_column, start, end): """ Put bounders in the query Args: query: SQL query string partition_column: partition_column name start: lower_bound end: upper_bound Returns: Query with bounders """
where = " WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}".format( partition_column, start, end ) query_with_bounders = "SELECT * FROM ({0}) AS TMP_TABLE {1}".format(query, where) return query_with_bounders
<SYSTEM_TASK:> Computes the index after a number of rows have been removed. <END_TASK> <USER_TASK:> Description: def compute_index(self, axis, data_object, compute_diff=True): """Computes the index after a number of rows have been removed. Note: In order for this to be used properly, the indexes must not be changed before you compute this. Args: axis: The axis to extract the index from. data_object: The new data object to extract the index from. compute_diff: True to use `self` to compute the index from self rather than data_object. This is used when the dimension of the index may have changed, but the deleted rows/columns are unknown. Returns: A new pandas.Index object. """
def pandas_index_extraction(df, axis): if not axis: return df.index else: try: return df.columns except AttributeError: return pandas.Index([]) index_obj = self.index if not axis else self.columns old_blocks = self.data if compute_diff else None new_indices = data_object.get_indices( axis=axis, index_func=lambda df: pandas_index_extraction(df, axis), old_blocks=old_blocks, ) return index_obj[new_indices] if compute_diff else new_indices
<SYSTEM_TASK:> Returns the numeric columns of the Manager. <END_TASK> <USER_TASK:> Description: def numeric_columns(self, include_bool=True): """Returns the numeric columns of the Manager. Returns: List of index names. """
columns = [] for col, dtype in zip(self.columns, self.dtypes): if is_numeric_dtype(dtype) and ( include_bool or (not include_bool and dtype != np.bool_) ): columns.append(col) return columns
<SYSTEM_TASK:> Preprocesses numeric functions to clean dataframe and pick numeric indices. <END_TASK> <USER_TASK:> Description: def numeric_function_clean_dataframe(self, axis): """Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager. """
result = None query_compiler = self # If no numeric columns and over columns, then return empty Series if not axis and len(self.index) == 0: result = pandas.Series(dtype=np.int64) nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] if len(nonnumeric) == len(self.columns): # If over rows and no numeric columns, return this if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return result, query_compiler
<SYSTEM_TASK:> Joins a list or two objects together. <END_TASK> <USER_TASK:> Description: def join(self, other, **kwargs): """Joins a list or two objects together. Args: other: The other object(s) to join on. Returns: Joined objects. """
if not isinstance(other, list): other = [other] return self._join_list_of_managers(other, **kwargs)
<SYSTEM_TASK:> Concatenates two objects together. <END_TASK> <USER_TASK:> Description: def concat(self, axis, other, **kwargs): """Concatenates two objects together. Args: axis: The axis index object to join (0 for columns, 1 for index). other: The other_index to concat with. Returns: Concatenated objects. """
return self._append_list_of_managers(other, axis, **kwargs)
<SYSTEM_TASK:> Copartition two QueryCompiler objects. <END_TASK> <USER_TASK:> Description: def copartition(self, axis, other, how_to_join, sort, force_repartition=False): """Copartition two QueryCompiler objects. Args: axis: The axis to copartition along. other: The other Query Compiler(s) to copartition against. how_to_join: How to manage joining the index object ("left", "right", etc.) sort: Whether or not to sort the joined index. force_repartition: Whether or not to force the repartitioning. By default, this method will skip repartitioning if it is possible. This is because reindexing is extremely inefficient. Because this method is used to `join` or `append`, it is vital that the internal indices match. Returns: A tuple (left query compiler, right query compiler list, joined index). """
if isinstance(other, type(self)): other = [other] index_obj = ( [o.index for o in other] if axis == 0 else [o.columns for o in other] ) joined_index = self._join_index_objects( axis ^ 1, index_obj, how_to_join, sort=sort ) # We have to set these because otherwise when we perform the functions it may # end up serializing this entire object. left_old_idx = self.index if axis == 0 else self.columns right_old_idxes = index_obj # Start with this and we'll repartition the first time, and then not again. reindexed_self = self.data reindexed_other_list = [] def compute_reindex(old_idx): """Create a function based on the old index and axis. Args: old_idx: The old index/columns Returns: A function that will be run in each partition. """ def reindex_partition(df): if axis == 0: df.index = old_idx new_df = df.reindex(index=joined_index) new_df.index = pandas.RangeIndex(len(new_df.index)) else: df.columns = old_idx new_df = df.reindex(columns=joined_index) new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df return reindex_partition for i in range(len(other)): # If the indices are equal we can skip partitioning so long as we are not # forced to repartition. See note above about `force_repartition`. if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition): reindex_left = None else: reindex_left = self._prepare_method(compute_reindex(left_old_idx)) if right_old_idxes[i].equals(joined_index) and not force_repartition: reindex_right = None else: reindex_right = other[i]._prepare_method( compute_reindex(right_old_idxes[i]) ) reindexed_self, reindexed_other = reindexed_self.copartition_datasets( axis, other[i].data, reindex_left, reindex_right ) reindexed_other_list.append(reindexed_other) return reindexed_self, reindexed_other_list, joined_index
<SYSTEM_TASK:> Helper method for inter-manager and scalar operations. <END_TASK> <USER_TASK:> Description: def _inter_df_op_handler(self, func, other, **kwargs): """Helper method for inter-manager and scalar operations. Args: func: The function to use on the Manager/scalar. other: The other Manager/scalar. Returns: New DataManager with new data and index. """
axis = kwargs.get("axis", 0) axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0 if isinstance(other, type(self)): return self._inter_manager_operations( other, "outer", lambda x, y: func(x, y, **kwargs) ) else: return self._scalar_operations( axis, other, lambda df: func(df, other, **kwargs) )
<SYSTEM_TASK:> Perform an operation between two objects. <END_TASK> <USER_TASK:> Description: def binary_op(self, op, other, **kwargs): """Perform an operation between two objects. Note: The list of operations is as follows: - add - eq - floordiv - ge - gt - le - lt - mod - mul - ne - pow - rfloordiv - rmod - rpow - rsub - rtruediv - sub - truediv - __and__ - __or__ - __xor__ Args: op: The operation. See list of operations above other: The object to operate against. Returns: A new QueryCompiler object. """
func = getattr(pandas.DataFrame, op) return self._inter_df_op_handler(func, other, **kwargs)
<SYSTEM_TASK:> Uses other manager to update corresponding values in this manager. <END_TASK> <USER_TASK:> Description: def update(self, other, **kwargs): """Uses other manager to update corresponding values in this manager. Args: other: The other manager. Returns: New DataManager with updated data and index. """
assert isinstance( other, type(self) ), "Must have the same DataManager subclass to perform this operation" def update_builder(df, other, **kwargs): # This is because of a requirement in Arrow df = df.copy() df.update(other, **kwargs) return df return self._inter_df_op_handler(update_builder, other, **kwargs)
<SYSTEM_TASK:> Gets values from this manager where cond is true else from other. <END_TASK> <USER_TASK:> Description: def where(self, cond, other, **kwargs): """Gets values from this manager where cond is true else from other. Args: cond: Condition on which to evaluate values. Returns: New DataManager with updated data and index. """
assert isinstance( cond, type(self) ), "Must have the same DataManager subclass to perform this operation" if isinstance(other, type(self)): # Note: Currently we are doing this with two maps across the entire # data. This can be done with a single map, but it will take a # modification in the `BlockPartition` class. # If this were in one pass it would be ~2x faster. # TODO (devin-petersohn) rewrite this to take one pass. def where_builder_first_pass(cond, other, **kwargs): return cond.where(cond, other, **kwargs) def where_builder_second_pass(df, new_other, **kwargs): return df.where(new_other.eq(True), new_other, **kwargs) first_pass = cond._inter_manager_operations( other, "left", where_builder_first_pass ) final_pass = self._inter_manager_operations( first_pass, "left", where_builder_second_pass ) return self.__constructor__(final_pass.data, self.index, self.columns) else: axis = kwargs.get("axis", 0) # Rather than serializing and passing in the index/columns, we will # just change this index to match the internal index. if isinstance(other, pandas.Series): other.index = pandas.RangeIndex(len(other.index)) def where_builder_series(df, cond): if axis == 0: df.index = pandas.RangeIndex(len(df.index)) cond.index = pandas.RangeIndex(len(cond.index)) else: df.columns = pandas.RangeIndex(len(df.columns)) cond.columns = pandas.RangeIndex(len(cond.columns)) return df.where(cond, other, **kwargs) reindexed_self, reindexed_cond, a = self.copartition( axis, cond, "left", False ) # Unwrap from list given by `copartition` reindexed_cond = reindexed_cond[0] new_data = reindexed_self.inter_data_operation( axis, lambda l, r: where_builder_series(l, r), reindexed_cond ) return self.__constructor__(new_data, self.index, self.columns)
<SYSTEM_TASK:> Handler for mapping scalar operations across a Manager. <END_TASK> <USER_TASK:> Description: def _scalar_operations(self, axis, scalar, func): """Handler for mapping scalar operations across a Manager. Args: axis: The axis index object to execute the function on. scalar: The scalar value to map. func: The function to use on the Manager with the scalar. Returns: A new QueryCompiler with updated data and new index. """
if isinstance(scalar, (list, np.ndarray, pandas.Series)): new_index = self.index if axis == 0 else self.columns def list_like_op(df): if axis == 0: df.index = new_index else: df.columns = new_index return func(df) new_data = self._map_across_full_axis( axis, self._prepare_method(list_like_op) ) return self.__constructor__(new_data, self.index, self.columns) else: return self._map_partitions(self._prepare_method(func))
<SYSTEM_TASK:> Fits a new index for this Manger. <END_TASK> <USER_TASK:> Description: def reindex(self, axis, labels, **kwargs): """Fits a new index for this Manger. Args: axis: The axis index object to target the reindex on. labels: New labels to conform 'axis' on to. Returns: A new QueryCompiler with updated data and new index. """
# To reindex, we need a function that will be shipped to each of the # partitions. def reindex_builer(df, axis, old_labels, new_labels, **kwargs): if axis: while len(df.columns) < len(old_labels): df[len(df.columns)] = np.nan df.columns = old_labels new_df = df.reindex(columns=new_labels, **kwargs) # reset the internal columns back to a RangeIndex new_df.columns = pandas.RangeIndex(len(new_df.columns)) return new_df else: while len(df.index) < len(old_labels): df.loc[len(df.index)] = np.nan df.index = old_labels new_df = df.reindex(index=new_labels, **kwargs) # reset the internal index back to a RangeIndex new_df.reset_index(inplace=True, drop=True) return new_df old_labels = self.columns if axis else self.index new_index = self.index if axis else labels new_columns = labels if axis else self.columns func = self._prepare_method( lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs) ) # The reindex can just be mapped over the axis we are modifying. This # is for simplicity in implementation. We specify num_splits here # because if we are repartitioning we should (in the future). # Additionally this operation is often followed by an operation that # assumes identical partitioning. Internally, we *may* change the # partitioning during a map across a full axis. new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, new_index, new_columns)
<SYSTEM_TASK:> Removes all levels from index and sets a default level_0 index. <END_TASK> <USER_TASK:> Description: def reset_index(self, **kwargs): """Removes all levels from index and sets a default level_0 index. Returns: A new QueryCompiler with updated data and reset index. """
drop = kwargs.get("drop", False) new_index = pandas.RangeIndex(len(self.index)) if not drop: if isinstance(self.index, pandas.MultiIndex): # TODO (devin-petersohn) ensure partitioning is properly aligned new_column_names = pandas.Index(self.index.names) new_columns = new_column_names.append(self.columns) index_data = pandas.DataFrame(list(zip(*self.index))).T result = self.data.from_pandas(index_data).concat(1, self.data) return self.__constructor__(result, new_index, new_columns) else: new_column_name = ( self.index.name if self.index.name is not None else "index" if "index" not in self.columns else "level_0" ) new_columns = self.columns.insert(0, new_column_name) result = self.insert(0, new_column_name, self.index) return self.__constructor__(result.data, new_index, new_columns) else: # The copies here are to ensure that we do not give references to # this object for the purposes of updates. return self.__constructor__( self.data.copy(), new_index, self.columns.copy(), self._dtype_cache )
<SYSTEM_TASK:> Apply function that will reduce the data to a Pandas Series. <END_TASK> <USER_TASK:> Description: def _full_reduce(self, axis, map_func, reduce_func=None): """Apply function that will reduce the data to a Pandas Series. Args: axis: 0 for columns and 1 for rows. Default is 0. map_func: Callable function to map the dataframe. reduce_func: Callable function to reduce the dataframe. If none, then apply map_func twice. Return: A new QueryCompiler object containing the results from map_func and reduce_func. """
if reduce_func is None: reduce_func = map_func mapped_parts = self.data.map_across_blocks(map_func) full_frame = mapped_parts.map_across_full_axis(axis, reduce_func) if axis == 0: columns = self.columns return self.__constructor__( full_frame, index=["__reduced__"], columns=columns ) else: index = self.index return self.__constructor__( full_frame, index=index, columns=["__reduced__"] )
<SYSTEM_TASK:> Counts the number of non-NaN objects for each column or row. <END_TASK> <USER_TASK:> Description: def count(self, **kwargs): """Counts the number of non-NaN objects for each column or row. Return: A new QueryCompiler object containing counts of non-NaN objects from each column or row. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().count(**kwargs) axis = kwargs.get("axis", 0) map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs) reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs) return self._full_reduce(axis, map_func, reduce_func)
<SYSTEM_TASK:> Returns the mean for each numerical column or row. <END_TASK> <USER_TASK:> Description: def mean(self, **kwargs): """Returns the mean for each numerical column or row. Return: A new QueryCompiler object containing the mean from each numerical column or row. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().mean(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) sums = self.sum(**kwargs) counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None)) if sums._is_transposed and counts._is_transposed: sums = sums.transpose() counts = counts.transpose() result = sums.binary_op("truediv", counts, axis=axis) return result.transpose() if axis == 0 else result
<SYSTEM_TASK:> Returns the minimum from each column or row. <END_TASK> <USER_TASK:> Description: def min(self, **kwargs): """Returns the minimum from each column or row. Return: A new QueryCompiler object with the minimum value from each column or row. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().min(**kwargs) mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs) return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
<SYSTEM_TASK:> Calculates the sum or product of the DataFrame. <END_TASK> <USER_TASK:> Description: def _process_sum_prod(self, func, **kwargs): """Calculates the sum or product of the DataFrame. Args: func: Pandas func to apply to DataFrame. ignore_axis: Whether to ignore axis when raising TypeError Return: A new QueryCompiler object with sum or prod of the object. """
axis = kwargs.get("axis", 0) min_count = kwargs.get("min_count", 0) def sum_prod_builder(df, **kwargs): return func(df, **kwargs) if min_count <= 1: return self._full_reduce(axis, sum_prod_builder) else: return self._full_axis_reduce(axis, sum_prod_builder)
<SYSTEM_TASK:> Returns the product of each numerical column or row. <END_TASK> <USER_TASK:> Description: def prod(self, **kwargs): """Returns the product of each numerical column or row. Return: A new QueryCompiler object with the product of each numerical column or row. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().prod(**kwargs) return self._process_sum_prod( self._build_mapreduce_func(pandas.DataFrame.prod, **kwargs), **kwargs )
<SYSTEM_TASK:> Calculates if any or all the values are true. <END_TASK> <USER_TASK:> Description: def _process_all_any(self, func, **kwargs): """Calculates if any or all the values are true. Return: A new QueryCompiler object containing boolean values or boolean. """
axis = kwargs.get("axis", 0) axis = 0 if axis is None else axis kwargs["axis"] = axis builder_func = self._build_mapreduce_func(func, **kwargs) return self._full_reduce(axis, builder_func)
<SYSTEM_TASK:> Returns whether all the elements are true, potentially over an axis. <END_TASK> <USER_TASK:> Description: def all(self, **kwargs): """Returns whether all the elements are true, potentially over an axis. Return: A new QueryCompiler object containing boolean values or boolean. """
if self._is_transposed: # Pandas ignores on axis=1 kwargs["bool_only"] = False kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().all(**kwargs) return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
<SYSTEM_TASK:> Converts columns dtypes to given dtypes. <END_TASK> <USER_TASK:> Description: def astype(self, col_dtypes, **kwargs): """Converts columns dtypes to given dtypes. Args: col_dtypes: Dictionary of {col: dtype,...} where col is the column name and dtype is a numpy dtype. Returns: DataFrame with updated dtypes. """
# Group indices to update by dtype for less map operations dtype_indices = {} columns = col_dtypes.keys() numeric_indices = list(self.columns.get_indexer_for(columns)) # Create Series for the updated dtypes new_dtypes = self.dtypes.copy() for i, column in enumerate(columns): dtype = col_dtypes[column] if ( not isinstance(dtype, type(self.dtypes[column])) or dtype != self.dtypes[column] ): # Only add dtype only if different if dtype in dtype_indices.keys(): dtype_indices[dtype].append(numeric_indices[i]) else: dtype_indices[dtype] = [numeric_indices[i]] # Update the new dtype series to the proper pandas dtype try: new_dtype = np.dtype(dtype) except TypeError: new_dtype = dtype if dtype != np.int32 and new_dtype == np.int32: new_dtype = np.dtype("int64") elif dtype != np.float32 and new_dtype == np.float32: new_dtype = np.dtype("float64") new_dtypes[column] = new_dtype # Update partitions for each dtype that is updated new_data = self.data for dtype in dtype_indices.keys(): def astype(df, internal_indices=[]): block_dtypes = {} for ind in internal_indices: block_dtypes[df.columns[ind]] = dtype return df.astype(block_dtypes) new_data = new_data.apply_func_to_select_indices( 0, astype, dtype_indices[dtype], keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
<SYSTEM_TASK:> Applies map that reduce Manager to series but require knowledge of full axis. <END_TASK> <USER_TASK:> Description: def _full_axis_reduce(self, axis, func, alternate_index=None): """Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data. """
result = self.data.map_across_full_axis(axis, func) if axis == 0: columns = alternate_index if alternate_index is not None else self.columns return self.__constructor__(result, index=["__reduced__"], columns=columns) else: index = alternate_index if alternate_index is not None else self.index return self.__constructor__(result, index=index, columns=["__reduced__"])
<SYSTEM_TASK:> Returns the first occurrence of the maximum over requested axis. <END_TASK> <USER_TASK:> Description: def idxmax(self, **kwargs): """Returns the first occurrence of the maximum over requested axis. Returns: A new QueryCompiler object containing the maximum of each column or axis. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().idxmax(**kwargs) axis = kwargs.get("axis", 0) index = self.index if axis == 0 else self.columns def idxmax_builder(df, **kwargs): if axis == 0: df.index = index else: df.columns = index return df.idxmax(**kwargs) func = self._build_mapreduce_func(idxmax_builder, **kwargs) return self._full_axis_reduce(axis, func)
<SYSTEM_TASK:> Returns the first occurrence of the minimum over requested axis. <END_TASK> <USER_TASK:> Description: def idxmin(self, **kwargs): """Returns the first occurrence of the minimum over requested axis. Returns: A new QueryCompiler object containing the minimum of each column or axis. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().idxmin(**kwargs) axis = kwargs.get("axis", 0) index = self.index if axis == 0 else self.columns def idxmin_builder(df, **kwargs): if axis == 0: df.index = index else: df.columns = index return df.idxmin(**kwargs) func = self._build_mapreduce_func(idxmin_builder, **kwargs) return self._full_axis_reduce(axis, func)
<SYSTEM_TASK:> Returns median of each column or row. <END_TASK> <USER_TASK:> Description: def median(self, **kwargs): """Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().median(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(axis, func)
<SYSTEM_TASK:> Returns the memory usage of each column. <END_TASK> <USER_TASK:> Description: def memory_usage(self, **kwargs): """Returns the memory usage of each column. Returns: A new QueryCompiler object containing the memory usage of each column. """
def memory_usage_builder(df, **kwargs): return df.memory_usage(**kwargs) func = self._build_mapreduce_func(memory_usage_builder, **kwargs) return self._full_axis_reduce(0, func)
<SYSTEM_TASK:> Returns quantile of each column or row. <END_TASK> <USER_TASK:> Description: def quantile_for_single_value(self, **kwargs): """Returns quantile of each column or row. Returns: A new QueryCompiler object containing the quantile of each column or row. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_single_value(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q", 0.5) assert type(q) is float def quantile_builder(df, **kwargs): try: return pandas.DataFrame.quantile(df, **kwargs) except ValueError: return pandas.Series() func = self._build_mapreduce_func(quantile_builder, **kwargs) result = self._full_axis_reduce(axis, func) if axis == 0: result.index = [q] else: result.columns = [q] return result
<SYSTEM_TASK:> Reduce Manger along select indices using function that needs full axis. <END_TASK> <USER_TASK:> Description: def _full_axis_reduce_along_select_indices(self, func, axis, index): """Reduce Manger along select indices using function that needs full axis. Args: func: Callable that reduces the dimension of the object and requires full knowledge of the entire axis. axis: 0 for columns and 1 for rows. Defaults to 0. index: Index of the resulting QueryCompiler. Returns: A new QueryCompiler object with index or BaseFrameManager object. """
# Convert indices to numeric indices old_index = self.index if axis else self.columns numeric_indices = [i for i, name in enumerate(old_index) if name in index] result = self.data.apply_func_to_select_indices_along_full_axis( axis, func, numeric_indices ) return result
<SYSTEM_TASK:> Generates descriptive statistics. <END_TASK> <USER_TASK:> Description: def describe(self, **kwargs): """Generates descriptive statistics. Returns: DataFrame object containing the descriptive statistics of the DataFrame. """
# Use pandas to calculate the correct columns new_columns = ( pandas.DataFrame(columns=self.columns) .astype(self.dtypes) .describe(**kwargs) .columns ) def describe_builder(df, internal_indices=[], **kwargs): return df.iloc[:, internal_indices].describe(**kwargs) # Apply describe and update indices, columns, and dtypes func = self._prepare_method(describe_builder, **kwargs) new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns) new_index = self.compute_index(0, new_data, False) return self.__constructor__(new_data, new_index, new_columns)
<SYSTEM_TASK:> Returns a new QueryCompiler with expr evaluated on columns. <END_TASK> <USER_TASK:> Description: def eval(self, expr, **kwargs): """Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr. """
columns = self.index if self._is_transposed else self.columns index = self.columns if self._is_transposed else self.index # Make a copy of columns and eval on the copy to determine if result type is # series or not columns_copy = pandas.DataFrame(columns=self.columns) columns_copy = columns_copy.eval(expr, inplace=False, **kwargs) expect_series = isinstance(columns_copy, pandas.Series) def eval_builder(df, **kwargs): # pop the `axis` parameter because it was needed to build the mapreduce # function but it is not a parameter used by `eval`. kwargs.pop("axis", None) df.columns = columns result = df.eval(expr, inplace=False, **kwargs) return result func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs) new_data = self._map_across_full_axis(1, func) if expect_series: new_columns = [columns_copy.name] new_index = index else: new_columns = columns_copy.columns new_index = self.index return self.__constructor__(new_data, new_index, new_columns)
<SYSTEM_TASK:> Returns a new QueryCompiler with modes calculated for each label along given axis. <END_TASK> <USER_TASK:> Description: def mode(self, **kwargs): """Returns a new QueryCompiler with modes calculated for each label along given axis. Returns: A new QueryCompiler with modes calculated. """
axis = kwargs.get("axis", 0) def mode_builder(df, **kwargs): result = df.mode(**kwargs) # We return a dataframe with the same shape as the input to ensure # that all the partitions will be the same shape if not axis and len(df) != len(result): # Pad columns append_values = pandas.DataFrame( columns=result.columns, index=range(len(result), len(df)) ) result = pandas.concat([result, append_values], ignore_index=True) elif axis and len(df.columns) != len(result.columns): # Pad rows append_vals = pandas.DataFrame( columns=range(len(result.columns), len(df.columns)), index=result.index, ) result = pandas.concat([result, append_vals], axis=1) return pandas.DataFrame(result) func = self._prepare_method(mode_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns)) new_dtypes = self._dtype_cache if new_dtypes is not None: new_dtypes.index = new_columns return self.__constructor__( new_data, new_index, new_columns, new_dtypes ).dropna(axis=axis, how="all")
<SYSTEM_TASK:> Replaces NaN values with the method provided. <END_TASK> <USER_TASK:> Description: def fillna(self, **kwargs): """Replaces NaN values with the method provided. Returns: A new QueryCompiler with null values filled. """
axis = kwargs.get("axis", 0) value = kwargs.get("value") if isinstance(value, dict): value = kwargs.pop("value") if axis == 0: index = self.columns else: index = self.index value = { idx: value[key] for key in value for idx in index.get_indexer_for([key]) } def fillna_dict_builder(df, func_dict={}): # We do this to ensure that no matter the state of the columns we get # the correct ones. func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict} return df.fillna(value=func_dict, **kwargs) new_data = self.data.apply_func_to_select_indices( axis, fillna_dict_builder, value, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns) else: func = self._prepare_method(pandas.DataFrame.fillna, **kwargs) new_data = self._map_across_full_axis(axis, func) return self.__constructor__(new_data, self.index, self.columns)
<SYSTEM_TASK:> Query columns of the DataManager with a boolean expression. <END_TASK> <USER_TASK:> Description: def query(self, expr, **kwargs): """Query columns of the DataManager with a boolean expression. Args: expr: Boolean expression to query the columns with. Returns: DataManager containing the rows where the boolean expression is satisfied. """
columns = self.columns def query_builder(df, **kwargs): # This is required because of an Arrow limitation # TODO revisit for Arrow error df = df.copy() df.index = pandas.RangeIndex(len(df)) df.columns = columns df.query(expr, inplace=True, **kwargs) df.columns = pandas.RangeIndex(len(df.columns)) return df func = self._prepare_method(query_builder, **kwargs) new_data = self._map_across_full_axis(1, func) # Query removes rows, so we need to update the index new_index = self.compute_index(0, new_data, True) return self.__constructor__(new_data, new_index, self.columns, self.dtypes)
<SYSTEM_TASK:> Computes numerical rank along axis. Equal values are set to the average. <END_TASK> <USER_TASK:> Description: def rank(self, **kwargs): """Computes numerical rank along axis. Equal values are set to the average. Returns: DataManager containing the ranks of the values along an axis. """
axis = kwargs.get("axis", 0) numeric_only = True if axis else kwargs.get("numeric_only", False) func = self._prepare_method(pandas.DataFrame.rank, **kwargs) new_data = self._map_across_full_axis(axis, func) # Since we assume no knowledge of internal state, we get the columns # from the internal partitions. if numeric_only: new_columns = self.compute_index(1, new_data, True) else: new_columns = self.columns new_dtypes = pandas.Series([np.float64 for _ in new_columns], index=new_columns) return self.__constructor__(new_data, self.index, new_columns, new_dtypes)
<SYSTEM_TASK:> Sorts the data with respect to either the columns or the indices. <END_TASK> <USER_TASK:> Description: def sort_index(self, **kwargs): """Sorts the data with respect to either the columns or the indices. Returns: DataManager containing the data sorted by columns or indices. """
axis = kwargs.pop("axis", 0) index = self.columns if axis else self.index # sort_index can have ascending be None and behaves as if it is False. # sort_values cannot have ascending be None. Thus, the following logic is to # convert the ascending argument to one that works with sort_values ascending = kwargs.pop("ascending", True) if ascending is None: ascending = False kwargs["ascending"] = ascending def sort_index_builder(df, **kwargs): if axis: df.columns = index else: df.index = index return df.sort_index(axis=axis, **kwargs) func = self._prepare_method(sort_index_builder, **kwargs) new_data = self._map_across_full_axis(axis, func) if axis: new_columns = pandas.Series(self.columns).sort_values(**kwargs) new_index = self.index else: new_index = pandas.Series(self.index).sort_values(**kwargs) new_columns = self.columns return self.__constructor__( new_data, new_index, new_columns, self.dtypes.copy() )
<SYSTEM_TASK:> Maps function to select indices along full axis. <END_TASK> <USER_TASK:> Description: def _map_across_full_axis_select_indices( self, axis, func, indices, keep_remaining=False ): """Maps function to select indices along full axis. Args: axis: 0 for columns and 1 for rows. func: Callable mapping function over the BlockParitions. indices: indices along axis to map over. keep_remaining: True if keep indices where function was not applied. Returns: BaseFrameManager containing the result of mapping func over axis on indices. """
return self.data.apply_func_to_select_indices_along_full_axis( axis, func, indices, keep_remaining )
<SYSTEM_TASK:> Returns Manager containing quantiles along an axis for numeric columns. <END_TASK> <USER_TASK:> Description: def quantile_for_list_of_values(self, **kwargs): """Returns Manager containing quantiles along an axis for numeric columns. Returns: DataManager containing quantiles of original DataManager along an axis. """
if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().quantile_for_list_of_values(**kwargs) axis = kwargs.get("axis", 0) q = kwargs.get("q") numeric_only = kwargs.get("numeric_only", True) assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list)) if numeric_only: new_columns = self.numeric_columns() else: new_columns = [ col for col, dtype in zip(self.columns, self.dtypes) if (is_numeric_dtype(dtype) or is_datetime_or_timedelta_dtype(dtype)) ] if axis: # If along rows, then drop the nonnumeric columns, record the index, and # take transpose. We have to do this because if we don't, the result is all # in one column for some reason. nonnumeric = [ col for col, dtype in zip(self.columns, self.dtypes) if not is_numeric_dtype(dtype) ] query_compiler = self.drop(columns=nonnumeric) new_columns = query_compiler.index else: query_compiler = self def quantile_builder(df, **kwargs): result = df.quantile(**kwargs) return result.T if axis == 1 else result func = query_compiler._prepare_method(quantile_builder, **kwargs) q_index = pandas.Float64Index(q) new_data = query_compiler._map_across_full_axis(axis, func) # This took a long time to debug, so here is the rundown of why this is needed. # Previously, we were operating on select indices, but that was broken. We were # not correctly setting the columns/index. Because of how we compute `to_pandas` # and because of the static nature of the index for `axis=1` it is easier to # just handle this as the transpose (see `quantile_builder` above for the # transpose within the partition) than it is to completely rework other # internal methods. Basically we are returning the transpose of the object for # correctness and cleanliness of the code. if axis == 1: q_index = new_columns new_columns = pandas.Float64Index(q) result = self.__constructor__(new_data, q_index, new_columns) return result.transpose() if axis == 1 else result
<SYSTEM_TASK:> Returns the last n rows. <END_TASK> <USER_TASK:> Description: def tail(self, n): """Returns the last n rows. Args: n: Integer containing the number of rows to return. Returns: DataManager containing the last n rows of the original DataManager. """
# See head for an explanation of the transposed behavior if n < 0: n = max(0, len(self.index) + n) if self._is_transposed: result = self.__constructor__( self.data.transpose().take(1, -n).transpose(), self.index[-n:], self.columns, self._dtype_cache, ) result._is_transposed = True else: result = self.__constructor__( self.data.take(0, -n), self.index[-n:], self.columns, self._dtype_cache ) return result
<SYSTEM_TASK:> Returns the first n columns. <END_TASK> <USER_TASK:> Description: def front(self, n): """Returns the first n columns. Args: n: Integer containing the number of columns to return. Returns: DataManager containing the first n columns of the original DataManager. """
new_dtypes = ( self._dtype_cache if self._dtype_cache is None else self._dtype_cache[:n] ) # See head for an explanation of the transposed behavior if self._is_transposed: result = self.__constructor__( self.data.transpose().take(0, n).transpose(), self.index, self.columns[:n], new_dtypes, ) result._is_transposed = True else: result = self.__constructor__( self.data.take(1, n), self.index, self.columns[:n], new_dtypes ) return result
<SYSTEM_TASK:> Get column data for target labels. <END_TASK> <USER_TASK:> Description: def getitem_column_array(self, key): """Get column data for target labels. Args: key: Target labels by which to retrieve data. Returns: A new QueryCompiler. """
# Convert to list for type checking numeric_indices = list(self.columns.get_indexer_for(key)) # Internal indices is left blank and the internal # `apply_func_to_select_indices` will do the conversion and pass it in. def getitem(df, internal_indices=[]): return df.iloc[:, internal_indices] result = self.data.apply_func_to_select_indices( 0, getitem, numeric_indices, keep_remaining=False ) # We can't just set the columns to key here because there may be # multiple instances of a key. new_columns = self.columns[numeric_indices] new_dtypes = self.dtypes[numeric_indices] return self.__constructor__(result, self.index, new_columns, new_dtypes)
<SYSTEM_TASK:> Get row data for target labels. <END_TASK> <USER_TASK:> Description: def getitem_row_array(self, key): """Get row data for target labels. Args: key: Target numeric indices by which to retrieve data. Returns: A new QueryCompiler. """
# Convert to list for type checking key = list(key) def getitem(df, internal_indices=[]): return df.iloc[internal_indices] result = self.data.apply_func_to_select_indices( 1, getitem, key, keep_remaining=False ) # We can't just set the index to key here because there may be multiple # instances of a key. new_index = self.index[key] return self.__constructor__(result, new_index, self.columns, self._dtype_cache)
<SYSTEM_TASK:> Set the column defined by `key` to the `value` provided. <END_TASK> <USER_TASK:> Description: def setitem(self, axis, key, value): """Set the column defined by `key` to the `value` provided. Args: key: The column name to set. value: The value to set the column to. Returns: A new QueryCompiler """
def setitem(df, internal_indices=[]): def _setitem(): if len(internal_indices) == 1: if axis == 0: df[df.columns[internal_indices[0]]] = value else: df.iloc[internal_indices[0]] = value else: if axis == 0: df[df.columns[internal_indices]] = value else: df.iloc[internal_indices] = value try: _setitem() except ValueError: # TODO: This is a workaround for a pyarrow serialization issue df = df.copy() _setitem() return df if axis == 0: numeric_indices = list(self.columns.get_indexer_for([key])) else: numeric_indices = list(self.index.get_indexer_for([key])) prepared_func = self._prepare_method(setitem) if is_list_like(value): new_data = self.data.apply_func_to_select_indices_along_full_axis( axis, prepared_func, numeric_indices, keep_remaining=True ) else: new_data = self.data.apply_func_to_select_indices( axis, prepared_func, numeric_indices, keep_remaining=True ) return self.__constructor__(new_data, self.index, self.columns)
<SYSTEM_TASK:> Remove row data for target index and columns. <END_TASK> <USER_TASK:> Description: def drop(self, index=None, columns=None): """Remove row data for target index and columns. Args: index: Target index to drop. columns: Target columns to drop. Returns: A new QueryCompiler. """
if self._is_transposed: return self.transpose().drop(index=columns, columns=index).transpose() if index is None: new_data = self.data new_index = self.index else: def delitem(df, internal_indices=[]): return df.drop(index=df.index[internal_indices]) numeric_indices = list(self.index.get_indexer_for(index)) new_data = self.data.apply_func_to_select_indices( 1, delitem, numeric_indices, keep_remaining=True ) # We can't use self.index.drop with duplicate keys because in Pandas # it throws an error. new_index = self.index[~self.index.isin(index)] if columns is None: new_columns = self.columns new_dtypes = self.dtypes else: def delitem(df, internal_indices=[]): return df.drop(columns=df.columns[internal_indices]) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = new_data.apply_func_to_select_indices( 0, delitem, numeric_indices, keep_remaining=True ) new_columns = self.columns[~self.columns.isin(columns)] new_dtypes = self.dtypes.drop(columns) return self.__constructor__(new_data, new_index, new_columns, new_dtypes)
<SYSTEM_TASK:> Insert new column data. <END_TASK> <USER_TASK:> Description: def insert(self, loc, column, value): """Insert new column data. Args: loc: Insertion index. column: Column labels to insert. value: Dtype object values to insert. Returns: A new PandasQueryCompiler with new data inserted. """
if is_list_like(value): # TODO make work with another querycompiler object as `value`. # This will require aligning the indices with a `reindex` and ensuring that # the data is partitioned identically. if isinstance(value, pandas.Series): value = value.reindex(self.index) value = list(value) def insert(df, internal_indices=[]): internal_idx = int(internal_indices[0]) old_index = df.index df.index = pandas.RangeIndex(len(df.index)) df.insert(internal_idx, internal_idx, value, allow_duplicates=True) df.columns = pandas.RangeIndex(len(df.columns)) df.index = old_index return df new_data = self.data.apply_func_to_select_indices_along_full_axis( 0, insert, loc, keep_remaining=True ) new_columns = self.columns.insert(loc, column) return self.__constructor__(new_data, self.index, new_columns)
<SYSTEM_TASK:> Apply func across given axis. <END_TASK> <USER_TASK:> Description: def apply(self, func, axis, *args, **kwargs): """Apply func across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """
if callable(func): return self._callable_func(func, axis, *args, **kwargs) elif isinstance(func, dict): return self._dict_func(func, axis, *args, **kwargs) elif is_list_like(func): return self._list_like_func(func, axis, *args, **kwargs) else: pass
<SYSTEM_TASK:> Recompute the index after applying function. <END_TASK> <USER_TASK:> Description: def _post_process_apply(self, result_data, axis, try_scale=True): """Recompute the index after applying function. Args: result_data: a BaseFrameManager object. axis: Target axis along which function was applied. Returns: A new PandasQueryCompiler. """
if try_scale: try: internal_index = self.compute_index(0, result_data, True) except IndexError: internal_index = self.compute_index(0, result_data, False) try: internal_columns = self.compute_index(1, result_data, True) except IndexError: internal_columns = self.compute_index(1, result_data, False) else: internal_index = self.compute_index(0, result_data, False) internal_columns = self.compute_index(1, result_data, False) if not axis: index = internal_index # We check if the two columns are the same length because if # they are the same length, `self.columns` is the correct index. # However, if the operation resulted in a different number of columns, # we must use the derived columns from `self.compute_index()`. if len(internal_columns) != len(self.columns): columns = internal_columns else: columns = self.columns else: columns = internal_columns # See above explanation for checking the lengths of columns if len(internal_index) != len(self.index): index = internal_index else: index = self.index return self.__constructor__(result_data, index, columns)
<SYSTEM_TASK:> Apply function to certain indices across given axis. <END_TASK> <USER_TASK:> Description: def _dict_func(self, func, axis, *args, **kwargs): """Apply function to certain indices across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """
if "axis" not in kwargs: kwargs["axis"] = axis if axis == 0: index = self.columns else: index = self.index func = {idx: func[key] for key in func for idx in index.get_indexer_for([key])} def dict_apply_builder(df, func_dict={}): # Sometimes `apply` can return a `Series`, but we require that internally # all objects are `DataFrame`s. return pandas.DataFrame(df.apply(func_dict, *args, **kwargs)) result_data = self.data.apply_func_to_select_indices_along_full_axis( axis, dict_apply_builder, func, keep_remaining=False ) full_result = self._post_process_apply(result_data, axis) return full_result
<SYSTEM_TASK:> Apply list-like function across given axis. <END_TASK> <USER_TASK:> Description: def _list_like_func(self, func, axis, *args, **kwargs): """Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """
func_prepared = self._prepare_method( lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)) ) new_data = self._map_across_full_axis(axis, func_prepared) # When the function is list-like, the function names become the index/columns new_index = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 0 else self.index ) new_columns = ( [f if isinstance(f, string_types) else f.__name__ for f in func] if axis == 1 else self.columns ) return self.__constructor__(new_data, new_index, new_columns)
<SYSTEM_TASK:> Apply callable functions across given axis. <END_TASK> <USER_TASK:> Description: def _callable_func(self, func, axis, *args, **kwargs): """Apply callable functions across given axis. Args: func: The functions to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler. """
def callable_apply_builder(df, axis=0): if not axis: df.index = index df.columns = pandas.RangeIndex(len(df.columns)) else: df.columns = index df.index = pandas.RangeIndex(len(df.index)) result = df.apply(func, axis=axis, *args, **kwargs) return result index = self.index if not axis else self.columns func_prepared = self._build_mapreduce_func(callable_apply_builder, axis=axis) result_data = self._map_across_full_axis(axis, func_prepared) return self._post_process_apply(result_data, axis)
<SYSTEM_TASK:> This method applies all manual partitioning functions. <END_TASK> <USER_TASK:> Description: def _manual_repartition(self, axis, repartition_func, **kwargs): """This method applies all manual partitioning functions. Args: axis: The axis to shuffle data along. repartition_func: The function used to repartition data. Returns: A `BaseFrameManager` object. """
func = self._prepare_method(repartition_func, **kwargs) return self.data.manual_shuffle(axis, func)
<SYSTEM_TASK:> Convert categorical variables to dummy variables for certain columns. <END_TASK> <USER_TASK:> Description: def get_dummies(self, columns, **kwargs): """Convert categorical variables to dummy variables for certain columns. Args: columns: The columns to convert. Returns: A new QueryCompiler. """
cls = type(self) # `columns` as None does not mean all columns, by default it means only # non-numeric columns. if columns is None: columns = [c for c in self.columns if not is_numeric_dtype(self.dtypes[c])] # If we aren't computing any dummies, there is no need for any # remote compute. if len(columns) == 0: return self.copy() elif not is_list_like(columns): columns = [columns] # We have to do one of two things in order to ensure the final columns # are correct. Our first option is to map over the data and assign the # columns in a separate pass. That is what we have chosen to do here. # This is not as efficient, but it requires less information from the # lower layers and does not break any of our internal requirements. The # second option is that we assign the columns as a part of the # `get_dummies` call. This requires knowledge of the length of each # partition, and breaks some of our assumptions and separation of # concerns. def set_columns(df, columns): df.columns = columns return df set_cols = self.columns columns_applied = self._map_across_full_axis( 1, lambda df: set_columns(df, set_cols) ) # In some cases, we are mapping across all of the data. It is more # efficient if we are mapping over all of the data to do it this way # than it would be to reuse the code for specific columns. if len(columns) == len(self.columns): def get_dummies_builder(df): if df is not None: if not df.empty: return pandas.get_dummies(df, **kwargs) else: return pandas.DataFrame([]) func = self._prepare_method(lambda df: get_dummies_builder(df)) new_data = columns_applied.map_across_full_axis(0, func) untouched_data = None else: def get_dummies_builder(df, internal_indices=[]): return pandas.get_dummies( df.iloc[:, internal_indices], columns=None, **kwargs ) numeric_indices = list(self.columns.get_indexer_for(columns)) new_data = columns_applied.apply_func_to_select_indices_along_full_axis( 0, get_dummies_builder, numeric_indices, keep_remaining=False ) untouched_data = self.drop(columns=columns) # Since we set the columns in the beginning, we can just extract them # here. There is fortunately no required extra steps for a correct # column index. final_columns = self.compute_index(1, new_data, False) # If we mapped over all the data we are done. If not, we need to # prepend the `new_data` with the raw data from the columns that were # not selected. if len(columns) != len(self.columns): new_data = untouched_data.data.concat(1, new_data) final_columns = untouched_data.columns.append(pandas.Index(final_columns)) return cls(new_data, self.index, final_columns)
<SYSTEM_TASK:> Helper method to check validity of other in inter-df operations <END_TASK> <USER_TASK:> Description: def _validate_other( self, other, axis, numeric_only=False, numeric_or_time_only=False, numeric_or_object_only=False, comparison_dtypes_only=False, ): """Helper method to check validity of other in inter-df operations"""
axis = self._get_axis_number(axis) if axis is not None else 1 result = other if isinstance(other, BasePandasDataset): return other._query_compiler elif is_list_like(other): if axis == 0: if len(other) != len(self._query_compiler.index): raise ValueError( "Unable to coerce to Series, length must be {0}: " "given {1}".format(len(self._query_compiler.index), len(other)) ) else: if len(other) != len(self._query_compiler.columns): raise ValueError( "Unable to coerce to Series, length must be {0}: " "given {1}".format( len(self._query_compiler.columns), len(other) ) ) if hasattr(other, "dtype"): other_dtypes = [other.dtype] * len(other) else: other_dtypes = [type(x) for x in other] else: other_dtypes = [ type(other) for _ in range( len(self._query_compiler.index) if axis else len(self._query_compiler.columns) ) ] # Do dtype checking if numeric_only: if not all( is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError("Cannot do operation on non-numeric dtypes") elif numeric_or_object_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or (is_object_dtype(self_dtype) and is_object_dtype(other_dtype)) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError("Cannot do operation non-numeric dtypes") elif comparison_dtypes_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or ( is_datetime_or_timedelta_dtype(self_dtype) and is_datetime_or_timedelta_dtype(other_dtype) ) or is_dtype_equal(self_dtype, other_dtype) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError( "Cannot do operation non-numeric objects with numeric objects" ) elif numeric_or_time_only: if not all( (is_numeric_dtype(self_dtype) and is_numeric_dtype(other_dtype)) or ( is_datetime_or_timedelta_dtype(self_dtype) and is_datetime_or_timedelta_dtype(other_dtype) ) for self_dtype, other_dtype in zip(self._get_dtypes(), other_dtypes) ): raise TypeError( "Cannot do operation non-numeric objects with numeric objects" ) return result
<SYSTEM_TASK:> Return the bool of a single element PandasObject. <END_TASK> <USER_TASK:> Description: def bool(self): """Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that element is not boolean """
shape = self.shape if shape != (1,) and shape != (1, 1): raise ValueError( """The PandasObject does not have exactly 1 element. Return the bool of a single element PandasObject. The truth value is ambiguous. Use a.empty, a.item(), a.any() or a.all().""" ) else: return self._to_pandas().bool()
<SYSTEM_TASK:> Flushes the call_queue and returns the data. <END_TASK> <USER_TASK:> Description: def get(self): """Flushes the call_queue and returns the data. Note: Since this object is a simple wrapper, just return the data. Returns: The object that was `put`. """
if self.call_queue: return self.apply(lambda df: df).data else: return self.data.copy()
<SYSTEM_TASK:> Add the function to the apply function call stack. <END_TASK> <USER_TASK:> Description: def add_to_apply_calls(self, func, **kwargs): """Add the function to the apply function call stack. This function will be executed when apply is called. It will be executed in the order inserted; apply's func operates the last and return """
import dask self.delayed_call = dask.delayed(func)(self.delayed_call, **kwargs) return self
<SYSTEM_TASK:> A memory efficient way to get a block of NaNs. <END_TASK> <USER_TASK:> Description: def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False): """A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block. """
global _NAN_BLOCKS if transpose: n_row, n_col = n_col, n_row shape = (n_row, n_col) if shape not in _NAN_BLOCKS: arr = np.tile(np.array(np.NaN), shape) # TODO Not use pandas.DataFrame here, but something more general. _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr)) return _NAN_BLOCKS[shape]
<SYSTEM_TASK:> Split the Pandas result evenly based on the provided number of splits. <END_TASK> <USER_TASK:> Description: def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): """Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames. """
if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] # We do this to restore block partitioning chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
<SYSTEM_TASK:> Unpack the user input for getitem and setitem and compute ndim <END_TASK> <USER_TASK:> Description: def _parse_tuple(tup): """Unpack the user input for getitem and setitem and compute ndim loc[a] -> ([a], :), 1D loc[[a,b],] -> ([a,b], :), loc[a,b] -> ([a], [b]), 0D """
row_loc, col_loc = slice(None), slice(None) if is_tuple(tup): row_loc = tup[0] if len(tup) == 2: col_loc = tup[1] if len(tup) > 2: raise IndexingError("Too many indexers") else: row_loc = tup ndim = _compute_ndim(row_loc, col_loc) row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) row_loc = [row_loc] if row_scaler else row_loc col_loc = [col_loc] if col_scaler else col_loc return row_loc, col_loc, ndim, row_scaler, col_scaler
<SYSTEM_TASK:> Determine if a locator will enlarge the global index. <END_TASK> <USER_TASK:> Description: def _is_enlargement(locator, global_index): """Determine if a locator will enlarge the global index. Enlargement happens when you trying to locate using labels isn't in the original index. In other words, enlargement == adding NaNs ! """
if ( is_list_like(locator) and not is_slice(locator) and len(locator) > 0 and not is_boolean_array(locator) and (isinstance(locator, type(global_index[0])) and locator not in global_index) ): n_diff_elems = len(pandas.Index(locator).difference(global_index)) is_enlargement_boolean = n_diff_elems > 0 return is_enlargement_boolean return False
<SYSTEM_TASK:> Compute the ndim of result from locators <END_TASK> <USER_TASK:> Description: def _compute_ndim(row_loc, col_loc): """Compute the ndim of result from locators """
row_scaler = is_scalar(row_loc) col_scaler = is_scalar(col_loc) if row_scaler and col_scaler: ndim = 0 elif row_scaler ^ col_scaler: ndim = 1 else: ndim = 2 return ndim
<SYSTEM_TASK:> Use numpy to broadcast or reshape item. <END_TASK> <USER_TASK:> Description: def _broadcast_item(self, row_lookup, col_lookup, item, to_shape): """Use numpy to broadcast or reshape item. Notes: - Numpy is memory efficient, there shouldn't be performance issue. """
# It is valid to pass a DataFrame or Series to __setitem__ that is larger than # the target the user is trying to overwrite. This if isinstance(item, (pandas.Series, pandas.DataFrame, DataFrame)): if not all(idx in item.index for idx in row_lookup): raise ValueError( "Must have equal len keys and value when setting with " "an iterable" ) if hasattr(item, "columns"): if not all(idx in item.columns for idx in col_lookup): raise ValueError( "Must have equal len keys and value when setting " "with an iterable" ) item = item.reindex(index=row_lookup, columns=col_lookup) else: item = item.reindex(index=row_lookup) try: item = np.array(item) if np.prod(to_shape) == np.prod(item.shape): return item.reshape(to_shape) else: return np.broadcast_to(item, to_shape) except ValueError: from_shape = np.array(item).shape raise ValueError( "could not broadcast input array from shape {from_shape} into shape " "{to_shape}".format(from_shape=from_shape, to_shape=to_shape) )
<SYSTEM_TASK:> Perform remote write and replace blocks. <END_TASK> <USER_TASK:> Description: def _write_items(self, row_lookup, col_lookup, item): """Perform remote write and replace blocks. """
self.qc.write_items(row_lookup, col_lookup, item)
<SYSTEM_TASK:> Helper for _enlarge_axis, compute common labels and extra labels. <END_TASK> <USER_TASK:> Description: def _compute_enlarge_labels(self, locator, base_index): """Helper for _enlarge_axis, compute common labels and extra labels. Returns: nan_labels: The labels needs to be added """
# base_index_type can be pd.Index or pd.DatetimeIndex # depending on user input and pandas behavior # See issue #2264 base_index_type = type(base_index) locator_as_index = base_index_type(locator) nan_labels = locator_as_index.difference(base_index) common_labels = locator_as_index.intersection(base_index) if len(common_labels) == 0: raise KeyError( "None of [{labels}] are in the [{base_index_name}]".format( labels=list(locator_as_index), base_index_name=base_index ) ) return nan_labels
<SYSTEM_TASK:> Splits the DataFrame read into smaller DataFrames and handles all edge cases. <END_TASK> <USER_TASK:> Description: def _split_result_for_readers(axis, num_splits, df): # pragma: no cover """Splits the DataFrame read into smaller DataFrames and handles all edge cases. Args: axis: Which axis to split over. num_splits: The number of splits to create. df: The DataFrame after it has been read. Returns: A list of pandas DataFrames. """
splits = split_result_of_axis_func_pandas(axis, num_splits, df) if not isinstance(splits, list): splits = [splits] return splits
<SYSTEM_TASK:> Use a Ray task to read columns from Parquet into a Pandas DataFrame. <END_TASK> <USER_TASK:> Description: def _read_parquet_columns(path, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from Parquet into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Parquet file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """
import pyarrow.parquet as pq df = pq.read_pandas(path, columns=columns, **kwargs).to_pandas() # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
<SYSTEM_TASK:> Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. <END_TASK> <USER_TASK:> Description: def _read_csv_with_offset_pandas_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the Pandas `read_csv` function. header: The header of the file. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """
index_col = kwargs.get("index_col", None) bio = file_open(fname, "rb") bio.seek(start) to_read = header + bio.read(end - start) bio.close() pandas_df = pandas.read_csv(BytesIO(to_read), **kwargs) pandas_df.columns = pandas.RangeIndex(len(pandas_df.columns)) if index_col is not None: index = pandas_df.index # Partitions must have RangeIndex pandas_df.index = pandas.RangeIndex(0, len(pandas_df)) else: # We will use the lengths to build the index if we are not given an # `index_col`. index = len(pandas_df) return _split_result_for_readers(1, num_splits, pandas_df) + [index]
<SYSTEM_TASK:> Use a Ray task to read columns from HDF5 into a Pandas DataFrame. <END_TASK> <USER_TASK:> Description: def _read_hdf_columns(path_or_buf, columns, num_splits, kwargs): # pragma: no cover """Use a Ray task to read columns from HDF5 into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path_or_buf: The path of the HDF5 file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """
df = pandas.read_hdf(path_or_buf, columns=columns, **kwargs) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
<SYSTEM_TASK:> Use a Ray task to read columns from Feather into a Pandas DataFrame. <END_TASK> <USER_TASK:> Description: def _read_feather_columns(path, columns, num_splits): # pragma: no cover """Use a Ray task to read columns from Feather into a Pandas DataFrame. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: path: The path of the Feather file. columns: The list of column names to read. num_splits: The number of partitions to split the column into. Returns: A list containing the split Pandas DataFrames and the Index as the last element. If there is not `index_col` set, then we just return the length. This is used to determine the total length of the DataFrame to build a default Index. """
from pyarrow import feather df = feather.read_feather(path, columns=columns) # Append the length of the index here to build it externally return _split_result_for_readers(0, num_splits, df) + [len(df.index)]
<SYSTEM_TASK:> Get the index from the indices returned by the workers. <END_TASK> <USER_TASK:> Description: def get_index(index_name, *partition_indices): # pragma: no cover """Get the index from the indices returned by the workers. Note: Ray functions are not detected by codecov (thus pragma: no cover)"""
index = partition_indices[0].append(partition_indices[1:]) index.names = index_name return index
<SYSTEM_TASK:> Load a h5 file from the file path or buffer, returning a DataFrame. <END_TASK> <USER_TASK:> Description: def read_hdf(cls, path_or_buf, **kwargs): """Load a h5 file from the file path or buffer, returning a DataFrame. Args: path_or_buf: string, buffer or path object Path to the file to open, or an open :class:`pandas.HDFStore` object. kwargs: Pass into pandas.read_hdf function. Returns: DataFrame constructed from the h5 file. """
if cls.read_hdf_remote_task is None: return super(RayIO, cls).read_hdf(path_or_buf, **kwargs) format = cls._validate_hdf_format(path_or_buf=path_or_buf) if format is None: ErrorMessage.default_to_pandas( "File format seems to be `fixed`. For better distribution consider saving the file in `table` format. " "df.to_hdf(format=`table`)." ) return cls.from_pandas(pandas.read_hdf(path_or_buf=path_or_buf, **kwargs)) columns = kwargs.get("columns", None) if not columns: empty_pd_df = pandas.read_hdf(path_or_buf, start=0, stop=0) columns = empty_pd_df.columns num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_hdf_remote_task._remote( args=(path_or_buf, cols, num_splits, kwargs), num_return_vals=num_splits + 1, ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
<SYSTEM_TASK:> Read a pandas.DataFrame from Feather format. <END_TASK> <USER_TASK:> Description: def read_feather(cls, path, columns=None, use_threads=True): """Read a pandas.DataFrame from Feather format. Ray DataFrame only supports pyarrow engine for now. Args: path: The filepath of the feather file. We only support local files for now. multi threading is set to True by default columns: not supported by pandas api, but can be passed here to read only specific columns use_threads: Whether or not to use threads when reading Notes: pyarrow feather is used. Please refer to the documentation here https://arrow.apache.org/docs/python/api.html#feather-format """
if cls.read_feather_remote_task is None: return super(RayIO, cls).read_feather( path, columns=columns, use_threads=use_threads ) if columns is None: from pyarrow.feather import FeatherReader fr = FeatherReader(path) columns = [fr.get_column_name(i) for i in range(fr.num_columns)] num_partitions = cls.frame_mgr_cls._compute_num_partitions() num_splits = min(len(columns), num_partitions) # Each item in this list will be a list of column names of the original df column_splits = ( len(columns) // num_partitions if len(columns) % num_partitions == 0 else len(columns) // num_partitions + 1 ) col_partitions = [ columns[i : i + column_splits] for i in range(0, len(columns), column_splits) ] blk_partitions = np.array( [ cls.read_feather_remote_task._remote( args=(path, cols, num_splits), num_return_vals=num_splits + 1 ) for cols in col_partitions ] ).T remote_partitions = np.array( [ [cls.frame_partition_cls(obj) for obj in row] for row in blk_partitions[:-1] ] ) index_len = ray.get(blk_partitions[-1][0]) index = pandas.RangeIndex(index_len) new_query_compiler = cls.query_compiler_cls( cls.frame_mgr_cls(remote_partitions), index, columns ) return new_query_compiler
<SYSTEM_TASK:> Convert the arg to datetime format. If not Ray DataFrame, this falls <END_TASK> <USER_TASK:> Description: def to_datetime( arg, errors="raise", dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin="unix", cache=False, ): """Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp """
if not isinstance(arg, DataFrame): return pandas.to_datetime( arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) # Pandas seems to ignore this kwarg so we will too pandas.to_datetime( pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache, ) return arg._query_compiler.to_datetime()
<SYSTEM_TASK:> Copartition two BlockPartitions objects. <END_TASK> <USER_TASK:> Description: def copartition_datasets(self, axis, other, left_func, right_func): """Copartition two BlockPartitions objects. Args: axis: The axis to copartition. other: The other BlockPartitions object to copartition with. left_func: The function to apply to left. If None, just use the dimension of self (based on axis). right_func: The function to apply to right. If None, check the dimensions of other and use the identity function if splitting needs to happen. Returns: A tuple of BlockPartitions objects, left and right. """
if left_func is None: new_self = self else: new_self = self.map_across_full_axis(axis, left_func) # This block of code will only shuffle if absolutely necessary. If we do need to # shuffle, we use the identity function and then reshuffle. if right_func is None: if axis == 0 and not np.array_equal( other.block_lengths, new_self.block_lengths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_lengths ) elif axis == 1 and not np.array_equal( other.block_widths, new_self.block_widths ): new_other = other.manual_shuffle( axis, lambda x: x, new_self.block_widths ) else: new_other = other # Most of the time, we will be given an operation to do. We perform that with # manual_shuffle. else: new_other = other.manual_shuffle( axis, right_func, new_self.block_lengths if axis == 0 else new_self.block_widths, ) return new_self, new_other
<SYSTEM_TASK:> Concatenate the blocks with another set of blocks. <END_TASK> <USER_TASK:> Description: def concat(self, axis, other_blocks): """Concatenate the blocks with another set of blocks. Note: Assumes that the blocks are already the same shape on the dimension being concatenated. A ValueError will be thrown if this condition is not met. Args: axis: The axis to concatenate to. other_blocks: the other blocks to be concatenated. This is a BaseFrameManager object. Returns: A new BaseFrameManager object, the type of object that called this. """
if type(other_blocks) is list: other_blocks = [blocks.partitions for blocks in other_blocks] return self.__constructor__( np.concatenate([self.partitions] + other_blocks, axis=axis) ) else: return self.__constructor__( np.append(self.partitions, other_blocks.partitions, axis=axis) )
<SYSTEM_TASK:> Convert this object into a Pandas DataFrame from the partitions. <END_TASK> <USER_TASK:> Description: def to_pandas(self, is_transposed=False): """Convert this object into a Pandas DataFrame from the partitions. Args: is_transposed: A flag for telling this object that the external representation is transposed, but not the internal. Returns: A Pandas DataFrame """
# In the case this is transposed, it is easier to just temporarily # transpose back then transpose after the conversion. The performance # is the same as if we individually transposed the blocks and # concatenated them, but the code is much smaller. if is_transposed: return self.transpose().to_pandas(False).T else: retrieved_objects = [ [obj.to_pandas() for obj in part] for part in self.partitions ] if all( isinstance(part, pandas.Series) for row in retrieved_objects for part in row ): axis = 0 elif all( isinstance(part, pandas.DataFrame) for row in retrieved_objects for part in row ): axis = 1 else: ErrorMessage.catch_bugs_and_request_email(True) df_rows = [ pandas.concat([part for part in row], axis=axis) for row in retrieved_objects if not all(part.empty for part in row) ] if len(df_rows) == 0: return pandas.DataFrame() else: return pandas.concat(df_rows)
<SYSTEM_TASK:> This gets the internal indices stored in the partitions. <END_TASK> <USER_TASK:> Description: def get_indices(self, axis=0, index_func=None, old_blocks=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful when you have deleted rows/columns internally, but do not know which ones were deleted. Args: axis: This axis to extract the labels. (0 - index, 1 - columns). index_func: The function to be used to extract the function. old_blocks: An optional previous object that this object was created from. This is used to compute the correct offsets. Returns: A Pandas Index object. """
ErrorMessage.catch_bugs_and_request_email(not callable(index_func)) func = self.preprocess_func(index_func) if axis == 0: # We grab the first column of blocks and extract the indices # Note: We use _partitions_cache in the context of this function to make # sure that none of the partitions are modified or filtered out before we # get the index information. # DO NOT CHANGE TO self.partitions under any circumstance. new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache.T[0]] if len(self._partitions_cache.T) else [] ) # This is important because sometimes we have resized the data. The new # sizes will not be valid if we are trying to compute the index on a # new object that has a different length. if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_lengths).cumsum() else: cumulative_block_lengths = np.array(self.block_lengths).cumsum() else: new_indices = ( [idx.apply(func).get() for idx in self._partitions_cache[0]] if len(self._partitions_cache) else [] ) if old_blocks is not None: cumulative_block_lengths = np.array(old_blocks.block_widths).cumsum() else: cumulative_block_lengths = np.array(self.block_widths).cumsum() full_indices = new_indices[0] if len(new_indices) else new_indices if old_blocks is not None: for i in range(len(new_indices)): # If the length is 0 there is nothing to append. if i == 0 or len(new_indices[i]) == 0: continue # The try-except here is intended to catch issues where we are # trying to get a string index out of the internal index. try: append_val = new_indices[i] + cumulative_block_lengths[i - 1] except TypeError: append_val = new_indices[i] full_indices = full_indices.append(append_val) else: full_indices = full_indices.append(new_indices[1:]) return full_indices
<SYSTEM_TASK:> Convert a global index to a block index and local index. <END_TASK> <USER_TASK:> Description: def _get_blocks_containing_index(self, axis, index): """Convert a global index to a block index and local index. Note: This method is primarily used to convert a global index into a partition index (along the axis provided) and local index (useful for `iloc` or similar operations. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) index: The global index to convert. Returns: A tuple containing (block index and internal index). """
if not axis: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_widths)) cumulative_column_widths = np.array(self.block_widths).cumsum() block_idx = int(np.digitize(index, cumulative_column_widths)) if block_idx == len(cumulative_column_widths): block_idx -= 1 # Compute the internal index based on the previous lengths. This # is a global index, so we must subtract the lengths first. internal_idx = ( index if not block_idx else index - cumulative_column_widths[block_idx - 1] ) else: ErrorMessage.catch_bugs_and_request_email(index > sum(self.block_lengths)) cumulative_row_lengths = np.array(self.block_lengths).cumsum() block_idx = int(np.digitize(index, cumulative_row_lengths)) # See note above about internal index internal_idx = ( index if not block_idx else index - cumulative_row_lengths[block_idx - 1] ) return block_idx, internal_idx
<SYSTEM_TASK:> Convert indices to a dict of block index to internal index mapping. <END_TASK> <USER_TASK:> Description: def _get_dict_of_block_index(self, axis, indices, ordered=False): """Convert indices to a dict of block index to internal index mapping. Note: See `_get_blocks_containing_index` for primary usage. This method accepts a list of indices rather than just a single value, and uses `_get_blocks_containing_index`. Args: axis: The axis along which to get the indices (0 - columns, 1 - rows) indices: A list of global indices to convert. Returns For unordered: a dictionary of {block index: list of local indices}. For ordered: a list of tuples mapping block index: list of local indices. """
# Get the internal index and create a dictionary so we only have to # travel to each partition once. all_partitions_and_idx = [ self._get_blocks_containing_index(axis, i) for i in indices ] # In ordered, we have to maintain the order of the list of indices provided. # This means that we need to return a list instead of a dictionary. if ordered: # In ordered, the partitions dict is a list of tuples partitions_dict = [] # This variable is used to store the most recent partition that we added to # the partitions_dict. This allows us to only visit a partition once when we # have multiple values that will be operated on in that partition. last_part = -1 for part_idx, internal_idx in all_partitions_and_idx: if part_idx == last_part: # We append to the list, which is the value part of the tuple. partitions_dict[-1][-1].append(internal_idx) else: # This is where we add new values. partitions_dict.append((part_idx, [internal_idx])) last_part = part_idx else: # For unordered, we can just return a dictionary mapping partition to the # list of indices being operated on. partitions_dict = {} for part_idx, internal_idx in all_partitions_and_idx: if part_idx not in partitions_dict: partitions_dict[part_idx] = [internal_idx] else: partitions_dict[part_idx].append(internal_idx) return partitions_dict
<SYSTEM_TASK:> Applies a function to a list of remote partitions. <END_TASK> <USER_TASK:> Description: def _apply_func_to_list_of_partitions(self, func, partitions, **kwargs): """Applies a function to a list of remote partitions. Note: The main use for this is to preprocess the func. Args: func: The func to apply partitions: The list of partitions Returns: A list of BaseFramePartition objects. """
preprocessed_func = self.preprocess_func(func) return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]
<SYSTEM_TASK:> Applies a function to select indices. <END_TASK> <USER_TASK:> Description: def apply_func_to_select_indices(self, axis, func, indices, keep_remaining=False): """Applies a function to select indices. Note: Your internal function must take a kwarg `internal_indices` for this to work correctly. This prevents information leakage of the internal index to the external representation. Args: axis: The axis to apply the func over. func: The function to apply to these indices. indices: The indices to apply the function to. keep_remaining: Whether or not to keep the other partitions. Some operations may want to drop the remaining partitions and keep only the results. Returns: A new BaseFrameManager object, the type of object that called this. """
if self.partitions.size == 0: return np.array([[]]) # Handling dictionaries has to be done differently, but we still want # to figure out the partitions that need to be applied to, so we will # store the dictionary in a separate variable and assign `indices` to # the keys to handle it the same as we normally would. if isinstance(indices, dict): dict_indices = indices indices = list(indices.keys()) else: dict_indices = None if not isinstance(indices, list): indices = [indices] partitions_dict = self._get_dict_of_block_index( axis, indices, ordered=not keep_remaining ) if not axis: partitions_for_apply = self.partitions.T else: partitions_for_apply = self.partitions # We may have a command to perform different functions on different # columns at the same time. We attempt to handle this as efficiently as # possible here. Functions that use this in the dictionary format must # accept a keyword argument `func_dict`. if dict_indices is not None: def local_to_global_idx(partition_id, local_idx): if partition_id == 0: return local_idx if axis == 0: cumulative_axis = np.cumsum(self.block_widths) else: cumulative_axis = np.cumsum(self.block_lengths) return cumulative_axis[partition_id - 1] + local_idx if not keep_remaining: result = np.array( [ self._apply_func_to_list_of_partitions( func, partitions_for_apply[o_idx], func_dict={ i_idx: dict_indices[local_to_global_idx(o_idx, i_idx)] for i_idx in list_to_apply if i_idx >= 0 }, ) for o_idx, list_to_apply in partitions_dict ] ) else: result = np.array( [ partitions_for_apply[i] if i not in partitions_dict else self._apply_func_to_list_of_partitions( func, partitions_for_apply[i], func_dict={ idx: dict_indices[local_to_global_idx(i, idx)] for idx in partitions_dict[i] if idx >= 0 }, ) for i in range(len(partitions_for_apply)) ] ) else: if not keep_remaining: # We are passing internal indices in here. In order for func to # actually be able to use this information, it must be able to take in # the internal indices. This might mean an iloc in the case of Pandas # or some other way to index into the internal representation. result = np.array( [ self._apply_func_to_list_of_partitions( func, partitions_for_apply[idx], internal_indices=list_to_apply, ) for idx, list_to_apply in partitions_dict ] ) else: # The difference here is that we modify a subset and return the # remaining (non-updated) blocks in their original position. result = np.array( [ partitions_for_apply[i] if i not in partitions_dict else self._apply_func_to_list_of_partitions( func, partitions_for_apply[i], internal_indices=partitions_dict[i], ) for i in range(len(partitions_for_apply)) ] ) return ( self.__constructor__(result.T) if not axis else self.__constructor__(result) )
<SYSTEM_TASK:> Apply a function to along both axis <END_TASK> <USER_TASK:> Description: def apply_func_to_indices_both_axis( self, func, row_indices, col_indices, lazy=False, keep_remaining=True, mutate=False, item_to_distribute=None, ): """ Apply a function to along both axis Important: For your func to operate directly on the indices provided, it must use `row_internal_indices, col_internal_indices` as keyword arguments. """
if keep_remaining: row_partitions_list = self._get_dict_of_block_index(1, row_indices).items() col_partitions_list = self._get_dict_of_block_index(0, col_indices).items() else: row_partitions_list = self._get_dict_of_block_index( 1, row_indices, ordered=True ) col_partitions_list = self._get_dict_of_block_index( 0, col_indices, ordered=True ) result = np.empty( (len(row_partitions_list), len(col_partitions_list)), dtype=type(self) ) if not mutate: partition_copy = self.partitions.copy() else: partition_copy = self.partitions row_position_counter = 0 for row_idx, row_values in enumerate(row_partitions_list): row_blk_idx, row_internal_idx = row_values col_position_counter = 0 for col_idx, col_values in enumerate(col_partitions_list): col_blk_idx, col_internal_idx = col_values remote_part = partition_copy[row_blk_idx, col_blk_idx] if item_to_distribute is not None: item = item_to_distribute[ row_position_counter : row_position_counter + len(row_internal_idx), col_position_counter : col_position_counter + len(col_internal_idx), ] item = {"item": item} else: item = {} if lazy: block_result = remote_part.add_to_apply_calls( func, row_internal_indices=row_internal_idx, col_internal_indices=col_internal_idx, **item ) else: block_result = remote_part.apply( func, row_internal_indices=row_internal_idx, col_internal_indices=col_internal_idx, **item ) if keep_remaining: partition_copy[row_blk_idx, col_blk_idx] = block_result else: result[row_idx][col_idx] = block_result col_position_counter += len(col_internal_idx) row_position_counter += len(row_internal_idx) if keep_remaining: return self.__constructor__(partition_copy) else: return self.__constructor__(result)
<SYSTEM_TASK:> Apply a function that requires two BaseFrameManager objects. <END_TASK> <USER_TASK:> Description: def inter_data_operation(self, axis, func, other): """Apply a function that requires two BaseFrameManager objects. Args: axis: The axis to apply the function over (0 - rows, 1 - columns) func: The function to apply other: The other BaseFrameManager object to apply func to. Returns: A new BaseFrameManager object, the type of object that called this. """
if axis: partitions = self.row_partitions other_partitions = other.row_partitions else: partitions = self.column_partitions other_partitions = other.column_partitions func = self.preprocess_func(func) result = np.array( [ partitions[i].apply( func, num_splits=self._compute_num_partitions(), other_axis_partition=other_partitions[i], ) for i in range(len(partitions)) ] ) return self.__constructor__(result) if axis else self.__constructor__(result.T)
<SYSTEM_TASK:> Shuffle the partitions based on the `shuffle_func`. <END_TASK> <USER_TASK:> Description: def manual_shuffle(self, axis, shuffle_func, lengths): """Shuffle the partitions based on the `shuffle_func`. Args: axis: The axis to shuffle across. shuffle_func: The function to apply before splitting the result. lengths: The length of each partition to split the result into. Returns: A new BaseFrameManager object, the type of object that called this. """
if axis: partitions = self.row_partitions else: partitions = self.column_partitions func = self.preprocess_func(shuffle_func) result = np.array([part.shuffle(func, lengths) for part in partitions]) return self.__constructor__(result) if axis else self.__constructor__(result.T)
<SYSTEM_TASK:> Creates a parser function from the given sep. <END_TASK> <USER_TASK:> Description: def _make_parser_func(sep): """Creates a parser function from the given sep. Args: sep: The separator default to use for the parser. Returns: A function object. """
def parser_func( filepath_or_buffer, sep=sep, delimiter=None, header="infer", names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression="infer", thousands=None, decimal=b".", lineterminator=None, quotechar='"', quoting=0, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, skipfooter=0, doublequote=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None, ): _, _, _, kwargs = inspect.getargvalues(inspect.currentframe()) if not kwargs.get("sep", sep): kwargs["sep"] = "\t" return _read(**kwargs) return parser_func
<SYSTEM_TASK:> Make a feature mask of categorical features in X. <END_TASK> <USER_TASK:> Description: def auto_select_categorical_features(X, threshold=10): """Make a feature mask of categorical features in X. Features with less than 10 unique values are considered categorical. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) Dense array or sparse matrix. threshold : int Maximum number of unique values per feature to consider the feature to be categorical. Returns ------- feature_mask : array of booleans of size {n_features, } """
feature_mask = [] for column in range(X.shape[1]): if sparse.issparse(X): indptr_start = X.indptr[column] indptr_end = X.indptr[column + 1] unique = np.unique(X.data[indptr_start:indptr_end]) else: unique = np.unique(X[:, column]) feature_mask.append(len(unique) <= threshold) return feature_mask
<SYSTEM_TASK:> Split X into selected features and other features <END_TASK> <USER_TASK:> Description: def _X_selected(X, selected): """Split X into selected features and other features"""
n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True non_sel = np.logical_not(sel) n_selected = np.sum(sel) X_sel = X[:, ind[sel]] X_not_sel = X[:, ind[non_sel]] return X_sel, X_not_sel, n_selected, n_features