text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Generate bin edge offsets and bin labels for one array using another array <END_TASK> <USER_TASK:> Description: def generate_bins_generic(values, binner, closed): """ Generate bin edge offsets and bin labels for one array using another array which has bin edge values. Both arrays must be sorted. Parameters ---------- values : array of values binner : a comparable array of values representing bins into which to bin the first array. Note, 'values' end-points must fall within 'binner' end-points. closed : which end of bin is closed; left (default), right Returns ------- bins : array of offsets (into 'values' argument) of bins. Zero and last edge are excluded in result, so for instance the first bin is values[0:bin[0]] and the last is values[bin[-1]:] """
lenidx = len(values) lenbin = len(binner) if lenidx <= 0 or lenbin <= 0: raise ValueError("Invalid length for values or for binner") # check binner fits data if values[0] < binner[0]: raise ValueError("Values falls before first bin") if values[lenidx - 1] > binner[lenbin - 1]: raise ValueError("Values falls after last bin") bins = np.empty(lenbin - 1, dtype=np.int64) j = 0 # index into values bc = 0 # bin count # linear scan, presume nothing about values/binner except that it fits ok for i in range(0, lenbin - 1): r_bin = binner[i + 1] # count values in current bin, advance to next bin while j < lenidx and (values[j] < r_bin or (closed == 'right' and values[j] == r_bin)): j += 1 bins[bc] = j bc += 1 return bins
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def wide_to_long(df, stubnames, i, j, sep="", suffix=r'\d+'): r""" Wide panel to long format. Less flexible but more user-friendly than melt. With stubnames ['A', 'B'], this function expects to find one or more group of columns with format A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,... You specify what you want to call this suffix in the resulting long format with `j` (for example `j='year'`) Each row of these wide variables are assumed to be uniquely identified by `i` (can be a single column name or a list of column names) All remaining variables in the data frame are left intact. Parameters ---------- df : DataFrame The wide-format DataFrame stubnames : str or list-like The stub name(s). The wide format variables are assumed to start with the stub names. i : str or list-like Column(s) to use as id variable(s) j : str The name of the sub-observation variable. What you wish to name your suffix in the long format. sep : str, default "" A character indicating the separation of the variable names in the wide format, to be stripped from the names in the long format. For example, if your column names are A-suffix1, A-suffix2, you can strip the hyphen by specifying `sep='-'` .. versionadded:: 0.20.0 suffix : str, default '\\d+' A regular expression capturing the wanted suffixes. '\\d+' captures numeric suffixes. Suffixes with no numbers could be specified with the negated character class '\\D+'. You can also further disambiguate suffixes, for example, if your wide variables are of the form A-one, B-two,.., and you have an unrelated column A-rating, you can ignore the last one by specifying `suffix='(!?one|two)'` .. versionadded:: 0.20.0 .. versionchanged:: 0.23.0 When all suffixes are numeric, they are cast to int64/float64. Returns ------- DataFrame A DataFrame that contains each stub name as a variable, with new index (i, j). Notes ----- All extra variables are left untouched. This simply uses `pandas.melt` under the hood, but is hard-coded to "do the right thing" in a typical case. Examples -------- >>> np.random.seed(123) >>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"}, ... "A1980" : {0 : "d", 1 : "e", 2 : "f"}, ... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7}, ... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1}, ... "X" : dict(zip(range(3), np.random.randn(3))) ... }) >>> df["id"] = df.index >>> df A1970 A1980 B1970 B1980 X id 0 a d 2.5 3.2 -1.085631 0 1 b e 1.2 1.3 0.997345 1 2 c f 0.7 0.1 0.282978 2 >>> pd.wide_to_long(df, ["A", "B"], i="id", j="year") ... # doctest: +NORMALIZE_WHITESPACE X A B id year 0 1970 -1.085631 a 2.5 1 1970 0.997345 b 1.2 2 1970 0.282978 c 0.7 0 1980 -1.085631 d 3.2 1 1980 0.997345 e 1.3 2 1980 0.282978 f 0.1 With multiple id columns >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht1 ht2 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 1 2.8 2 3.4 2 1 2.9 2 3.8 3 1 2.2 2 2.9 2 1 1 2.0 2 3.2 2 1 1.8 2 2.8 3 1 1.9 2 2.4 3 1 1 2.2 2 3.3 2 1 2.3 2 3.4 3 1 2.1 2 2.9 Going from long back to wide just takes some creative use of `unstack` >>> w = l.unstack() >>> w.columns = w.columns.map('{0[0]}{0[1]}'.format) >>> w.reset_index() famid birth ht1 ht2 0 1 1 2.8 3.4 1 1 2 2.9 3.8 2 1 3 2.2 2.9 3 2 1 2.0 3.2 4 2 2 1.8 2.8 5 2 3 1.9 2.4 6 3 1 2.2 3.3 7 3 2 2.3 3.4 8 3 3 2.1 2.9 Less wieldy column names are also handled >>> np.random.seed(0) >>> df = pd.DataFrame({'A(quarterly)-2010': np.random.rand(3), ... 'A(quarterly)-2011': np.random.rand(3), ... 'B(quarterly)-2010': np.random.rand(3), ... 'B(quarterly)-2011': np.random.rand(3), ... 'X' : np.random.randint(3, size=3)}) >>> df['id'] = df.index >>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS A(quarterly)-2010 A(quarterly)-2011 B(quarterly)-2010 ... 0 0.548814 0.544883 0.437587 ... 1 0.715189 0.423655 0.891773 ... 2 0.602763 0.645894 0.963663 ... X id 0 0 0 1 1 1 2 1 2 >>> pd.wide_to_long(df, ['A(quarterly)', 'B(quarterly)'], i='id', ... j='year', sep='-') ... # doctest: +NORMALIZE_WHITESPACE X A(quarterly) B(quarterly) id year 0 2010 0 0.548814 0.437587 1 2010 1 0.715189 0.891773 2 2010 1 0.602763 0.963663 0 2011 0 0.544883 0.383442 1 2011 1 0.423655 0.791725 2 2011 1 0.645894 0.528895 If we have many columns, we could also use a regex to find our stubnames and pass that list on to wide_to_long >>> stubnames = sorted( ... set([match[0] for match in df.columns.str.findall( ... r'[A-B]\(.*\)').values if match != [] ]) ... ) >>> list(stubnames) ['A(quarterly)', 'B(quarterly)'] All of the above examples have integers as suffixes. It is possible to have non-integers as suffixes. >>> df = pd.DataFrame({ ... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3], ... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3], ... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], ... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9] ... }) >>> df birth famid ht_one ht_two 0 1 1 2.8 3.4 1 2 1 2.9 3.8 2 3 1 2.2 2.9 3 1 2 2.0 3.2 4 2 2 1.8 2.8 5 3 2 1.9 2.4 6 1 3 2.2 3.3 7 2 3 2.3 3.4 8 3 3 2.1 2.9 >>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age', sep='_', suffix='\w') >>> l ... # doctest: +NORMALIZE_WHITESPACE ht famid birth age 1 1 one 2.8 two 3.4 2 one 2.9 two 3.8 3 one 2.2 two 2.9 2 1 one 2.0 two 3.2 2 one 1.8 two 2.8 3 one 1.9 two 2.4 3 1 one 2.2 two 3.3 2 one 2.3 two 3.4 3 one 2.1 two 2.9 """
def get_var_names(df, stub, sep, suffix): regex = r'^{stub}{sep}{suffix}$'.format( stub=re.escape(stub), sep=re.escape(sep), suffix=suffix) pattern = re.compile(regex) return [col for col in df.columns if pattern.match(col)] def melt_stub(df, stub, i, j, value_vars, sep): newdf = melt(df, id_vars=i, value_vars=value_vars, value_name=stub.rstrip(sep), var_name=j) newdf[j] = Categorical(newdf[j]) newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "") # GH17627 Cast numerics suffixes to int/float newdf[j] = to_numeric(newdf[j], errors='ignore') return newdf.set_index(i + [j]) if not is_list_like(stubnames): stubnames = [stubnames] else: stubnames = list(stubnames) if any(col in stubnames for col in df.columns): raise ValueError("stubname can't be identical to a column name") if not is_list_like(i): i = [i] else: i = list(i) if df[i].duplicated().any(): raise ValueError("the id variables need to uniquely identify each row") value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames] value_vars_flattened = [e for sublist in value_vars for e in sublist] id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened)) melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)] melted = melted[0].join(melted[1:], how='outer') if len(i) == 1: new = df[id_vars].set_index(i).join(melted) return new new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j]) return new
<SYSTEM_TASK:> Safe get multiple indices, translate keys for <END_TASK> <USER_TASK:> Description: def _get_indices(self, names): """ Safe get multiple indices, translate keys for datelike to underlying repr. """
def get_converter(s): # possibly convert to the actual key types # in the indices, could be a Timestamp or a np.datetime64 if isinstance(s, (Timestamp, datetime.datetime)): return lambda key: Timestamp(key) elif isinstance(s, np.datetime64): return lambda key: Timestamp(key).asm8 else: return lambda key: key if len(names) == 0: return [] if len(self.indices) > 0: index_sample = next(iter(self.indices)) else: index_sample = None # Dummy sample name_sample = names[0] if isinstance(index_sample, tuple): if not isinstance(name_sample, tuple): msg = ("must supply a tuple to get_group with multiple" " grouping keys") raise ValueError(msg) if not len(name_sample) == len(index_sample): try: # If the original grouper was a tuple return [self.indices[name] for name in names] except KeyError: # turns out it wasn't a tuple msg = ("must supply a same-length tuple to get_group" " with multiple grouping keys") raise ValueError(msg) converters = [get_converter(s) for s in index_sample] names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) else: converter = get_converter(index_sample) names = (converter(name) for name in names) return [self.indices.get(name, []) for name in names]
<SYSTEM_TASK:> Create group based selection. <END_TASK> <USER_TASK:> Description: def _set_group_selection(self): """ Create group based selection. Used when selection is not passed directly but instead via a grouper. NOTE: this should be paired with a call to _reset_group_selection """
grp = self.grouper if not (self.as_index and getattr(grp, 'groupings', None) is not None and self.obj.ndim > 1 and self._group_selection is None): return ax = self.obj._info_axis groupers = [g.name for g in grp.groupings if g.level is None and g.in_axis] if len(groupers): # GH12839 clear selected obj cache when group selection changes self._group_selection = ax.difference(Index(groupers), sort=False).tolist() self._reset_cache('_selected_obj')
<SYSTEM_TASK:> Construct NDFrame from group with provided name. <END_TASK> <USER_TASK:> Description: def get_group(self, name, obj=None): """ Construct NDFrame from group with provided name. Parameters ---------- name : object the name of the group to get as a DataFrame obj : NDFrame, default None the NDFrame to take the DataFrame out of. If it is None, the object groupby was called on will be used Returns ------- group : same type as obj """
if obj is None: obj = self._selected_obj inds = self._get_index(name) if not len(inds): raise KeyError(name) return obj._take(inds, axis=self.axis)
<SYSTEM_TASK:> Try to cast the result to our obj original type, <END_TASK> <USER_TASK:> Description: def _try_cast(self, result, obj, numeric_only=False): """ Try to cast the result to our obj original type, we may have roundtripped through object in the mean-time. If numeric_only is True, then only try to cast numerics and not datetimelikes. """
if obj.ndim > 1: dtype = obj._values.dtype else: dtype = obj.dtype if not is_scalar(result): if is_datetime64tz_dtype(dtype): # GH 23683 # Prior results _may_ have been generated in UTC. # Ensure we localize to UTC first before converting # to the target timezone try: result = obj._values._from_sequence( result, dtype='datetime64[ns, UTC]' ) result = result.astype(dtype) except TypeError: # _try_cast was called at a point where the result # was already tz-aware pass elif is_extension_array_dtype(dtype): # The function can return something of any type, so check # if the type is compatible with the calling EA. try: result = obj._values._from_sequence(result, dtype=dtype) except Exception: # https://github.com/pandas-dev/pandas/issues/22850 # pandas has no control over what 3rd-party ExtensionArrays # do in _values_from_sequence. We still want ops to work # though, so we catch any regular Exception. pass elif numeric_only and is_numeric_dtype(dtype) or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) return result
<SYSTEM_TASK:> Compute standard error of the mean of groups, excluding missing values. <END_TASK> <USER_TASK:> Description: def sem(self, ddof=1): """ Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : integer, default 1 degrees of freedom """
return self.std(ddof=ddof) / np.sqrt(self.count())
<SYSTEM_TASK:> Add numeric operations to the GroupBy generically. <END_TASK> <USER_TASK:> Description: def _add_numeric_operations(cls): """ Add numeric operations to the GroupBy generically. """
def groupby_function(name, alias, npfunc, numeric_only=True, _convert=False, min_count=-1): _local_template = "Compute %(f)s of group values" @Substitution(name='groupby', f=name) @Appender(_common_see_also) @Appender(_local_template) def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only if 'min_count' not in kwargs: kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( alias, alt=npfunc, **kwargs) except AssertionError as e: raise SpecificationError(str(e)) except Exception: result = self.aggregate( lambda x: npfunc(x, axis=self.axis)) if _convert: result = result._convert(datetime=True) return result set_function_name(f, name, cls) return f def first_compat(x, axis=0): def first(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[0] if isinstance(x, DataFrame): return x.apply(first, axis=axis) else: return first(x) def last_compat(x, axis=0): def last(x): x = x.to_numpy() x = x[notna(x)] if len(x) == 0: return np.nan return x[-1] if isinstance(x, DataFrame): return x.apply(last, axis=axis) else: return last(x) cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, numeric_only=False) cls.last = groupby_function('last', 'last', last_compat, numeric_only=False)
<SYSTEM_TASK:> Provide resampling when using a TimeGrouper. <END_TASK> <USER_TASK:> Description: def resample(self, rule, *args, **kwargs): """ Provide resampling when using a TimeGrouper. Given a grouper, the function resamples it according to a string "string" -> "frequency". See the :ref:`frequency aliases <timeseries.offset_aliases>` documentation for more details. Parameters ---------- rule : str or DateOffset The offset string or object representing target grouper conversion. *args, **kwargs Possible arguments are `how`, `fill_method`, `limit`, `kind` and `on`, and other arguments of `TimeGrouper`. Returns ------- Grouper Return a new grouper with our resampler appended. See Also -------- Grouper : Specify a frequency to resample with when grouping by a key. DatetimeIndex.resample : Frequency conversion and resampling of time series. Examples -------- >>> idx = pd.date_range('1/1/2000', periods=4, freq='T') >>> df = pd.DataFrame(data=4 * [range(2)], ... index=idx, ... columns=['a', 'b']) >>> df.iloc[2, 0] = 5 >>> df a b 2000-01-01 00:00:00 0 1 2000-01-01 00:01:00 0 1 2000-01-01 00:02:00 5 1 2000-01-01 00:03:00 0 1 Downsample the DataFrame into 3 minute bins and sum the values of the timestamps falling into a bin. >>> df.groupby('a').resample('3T').sum() a b a 0 2000-01-01 00:00:00 0 2 2000-01-01 00:03:00 0 1 5 2000-01-01 00:00:00 5 1 Upsample the series into 30 second bins. >>> df.groupby('a').resample('30S').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:00:30 0 0 2000-01-01 00:01:00 0 1 2000-01-01 00:01:30 0 0 2000-01-01 00:02:00 0 0 2000-01-01 00:02:30 0 0 2000-01-01 00:03:00 0 1 5 2000-01-01 00:02:00 5 1 Resample by month. Values are assigned to the month of the period. >>> df.groupby('a').resample('M').sum() a b a 0 2000-01-31 0 3 5 2000-01-31 5 1 Downsample the series into 3 minute bins as above, but close the right side of the bin interval. >>> df.groupby('a').resample('3T', closed='right').sum() a b a 0 1999-12-31 23:57:00 0 1 2000-01-01 00:00:00 0 2 5 2000-01-01 00:00:00 5 1 Downsample the series into 3 minute bins and close the right side of the bin interval, but label each bin using the right edge instead of the left. >>> df.groupby('a').resample('3T', closed='right', label='right').sum() a b a 0 2000-01-01 00:00:00 0 1 2000-01-01 00:03:00 0 2 5 2000-01-01 00:03:00 5 1 Add an offset of twenty seconds. >>> df.groupby('a').resample('3T', loffset='20s').sum() a b a 0 2000-01-01 00:00:20 0 2 2000-01-01 00:03:20 0 1 5 2000-01-01 00:00:20 5 1 """
from pandas.core.resample import get_resampler_for_grouping return get_resampler_for_grouping(self, rule, *args, **kwargs)
<SYSTEM_TASK:> Return an expanding grouper, providing expanding <END_TASK> <USER_TASK:> Description: def expanding(self, *args, **kwargs): """ Return an expanding grouper, providing expanding functionality per group. """
from pandas.core.window import ExpandingGroupby return ExpandingGroupby(self, *args, **kwargs)
<SYSTEM_TASK:> Shared function for `pad` and `backfill` to call Cython method. <END_TASK> <USER_TASK:> Description: def _fill(self, direction, limit=None): """ Shared function for `pad` and `backfill` to call Cython method. Parameters ---------- direction : {'ffill', 'bfill'} Direction passed to underlying Cython function. `bfill` will cause values to be filled backwards. `ffill` and any other values will default to a forward fill limit : int, default None Maximum number of consecutive values to fill. If `None`, this method will convert to -1 prior to passing to Cython Returns ------- `Series` or `DataFrame` with filled values See Also -------- pad backfill """
# Need int value for Cython if limit is None: limit = -1 return self._get_cythonized_result('group_fillna_indexer', self.grouper, needs_mask=True, cython_dtype=np.int64, result_is_index=True, direction=direction, limit=limit)
<SYSTEM_TASK:> Return group values at the given quantile, a la numpy.percentile. <END_TASK> <USER_TASK:> Description: def quantile(self, q=0.5, interpolation='linear'): """ Return group values at the given quantile, a la numpy.percentile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Value(s) between 0 and 1 providing the quantile(s) to compute. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} Method to use when the desired quantile falls between two points. Returns ------- Series or DataFrame Return type determined by caller of GroupBy object. See Also -------- Series.quantile : Similar method for Series. DataFrame.quantile : Similar method for DataFrame. numpy.percentile : NumPy method to compute qth percentile. Examples -------- >>> df = pd.DataFrame([ ... ['a', 1], ['a', 2], ['a', 3], ... ['b', 1], ['b', 3], ['b', 5] ... ], columns=['key', 'val']) >>> df.groupby('key').quantile() val key a 2.0 b 3.0 """
def pre_processor( vals: np.ndarray ) -> Tuple[np.ndarray, Optional[Type]]: if is_object_dtype(vals): raise TypeError("'quantile' cannot be performed against " "'object' dtypes!") inference = None if is_integer_dtype(vals): inference = np.int64 elif is_datetime64_dtype(vals): inference = 'datetime64[ns]' vals = vals.astype(np.float) return vals, inference def post_processor( vals: np.ndarray, inference: Optional[Type] ) -> np.ndarray: if inference: # Check for edge case if not (is_integer_dtype(inference) and interpolation in {'linear', 'midpoint'}): vals = vals.astype(inference) return vals return self._get_cythonized_result('group_quantile', self.grouper, aggregate=True, needs_values=True, needs_mask=True, cython_dtype=np.float64, pre_processing=pre_processor, post_processing=post_processor, q=q, interpolation=interpolation)
<SYSTEM_TASK:> Number each group from 0 to the number of groups - 1. <END_TASK> <USER_TASK:> Description: def ngroup(self, ascending=True): """ Number each group from 0 to the number of groups - 1. This is the enumerative complement of cumcount. Note that the numbers given to the groups match the order in which the groups would be seen when iterating over the groupby object, not the order they are first observed. .. versionadded:: 0.20.2 Parameters ---------- ascending : bool, default True If False, number in reverse, from number of group - 1 to 0. See Also -------- .cumcount : Number the rows in each group. Examples -------- >>> df = pd.DataFrame({"A": list("aaabba")}) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').ngroup() 0 0 1 0 2 0 3 1 4 1 5 0 dtype: int64 >>> df.groupby('A').ngroup(ascending=False) 0 1 1 1 2 1 3 0 4 0 5 1 dtype: int64 >>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup() 0 0 1 0 2 1 3 3 4 2 5 0 dtype: int64 """
with _group_selection_context(self): index = self._selected_obj.index result = Series(self.grouper.group_info[0], index) if not ascending: result = self.ngroups - 1 - result return result
<SYSTEM_TASK:> Number each item in each group from 0 to the length of that group - 1. <END_TASK> <USER_TASK:> Description: def cumcount(self, ascending=True): """ Number each item in each group from 0 to the length of that group - 1. Essentially this is equivalent to >>> self.apply(lambda x: pd.Series(np.arange(len(x)), x.index)) Parameters ---------- ascending : bool, default True If False, number in reverse, from length of group - 1 to 0. See Also -------- .ngroup : Number the groups themselves. Examples -------- >>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']], ... columns=['A']) >>> df A 0 a 1 a 2 a 3 b 4 b 5 a >>> df.groupby('A').cumcount() 0 0 1 1 2 2 3 0 4 1 5 3 dtype: int64 >>> df.groupby('A').cumcount(ascending=False) 0 3 1 2 2 1 3 1 4 0 5 0 dtype: int64 """
with _group_selection_context(self): index = self._selected_obj.index cumcounts = self._cumcount_array(ascending=ascending) return Series(cumcounts, index)
<SYSTEM_TASK:> Provide the rank of values within each group. <END_TASK> <USER_TASK:> Description: def rank(self, method='average', ascending=True, na_option='keep', pct=False, axis=0): """ Provide the rank of values within each group. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) na_option : {'keep', 'top', 'bottom'}, default 'keep' * keep: leave NA values where they are * top: smallest rank if ascending * bottom: smallest rank if descending pct : boolean, default False Compute percentage rank of data within each group axis : int, default 0 The axis of the object over which to compute the rank. Returns ------- DataFrame with ranking of values within each group """
if na_option not in {'keep', 'top', 'bottom'}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) return self._cython_transform('rank', numeric_only=False, ties_method=method, ascending=ascending, na_option=na_option, pct=pct, axis=axis)
<SYSTEM_TASK:> Cumulative product for each group. <END_TASK> <USER_TASK:> Description: def cumprod(self, axis=0, *args, **kwargs): """ Cumulative product for each group. """
nv.validate_groupby_func('cumprod', args, kwargs, ['numeric_only', 'skipna']) if axis != 0: return self.apply(lambda x: x.cumprod(axis=axis, **kwargs)) return self._cython_transform('cumprod', **kwargs)
<SYSTEM_TASK:> Cumulative min for each group. <END_TASK> <USER_TASK:> Description: def cummin(self, axis=0, **kwargs): """ Cumulative min for each group. """
if axis != 0: return self.apply(lambda x: np.minimum.accumulate(x, axis)) return self._cython_transform('cummin', numeric_only=False)
<SYSTEM_TASK:> Cumulative max for each group. <END_TASK> <USER_TASK:> Description: def cummax(self, axis=0, **kwargs): """ Cumulative max for each group. """
if axis != 0: return self.apply(lambda x: np.maximum.accumulate(x, axis)) return self._cython_transform('cummax', numeric_only=False)
<SYSTEM_TASK:> Get result for Cythonized functions. <END_TASK> <USER_TASK:> Description: def _get_cythonized_result(self, how, grouper, aggregate=False, cython_dtype=None, needs_values=False, needs_mask=False, needs_ngroups=False, result_is_index=False, pre_processing=None, post_processing=None, **kwargs): """ Get result for Cythonized functions. Parameters ---------- how : str, Cythonized function name to be called grouper : Grouper object containing pertinent group info aggregate : bool, default False Whether the result should be aggregated to match the number of groups cython_dtype : default None Type of the array that will be modified by the Cython call. If `None`, the type will be inferred from the values of each slice needs_values : bool, default False Whether the values should be a part of the Cython call signature needs_mask : bool, default False Whether boolean mask needs to be part of the Cython call signature needs_ngroups : bool, default False Whether number of groups is part of the Cython call signature result_is_index : bool, default False Whether the result of the Cython operation is an index of values to be retrieved, instead of the actual values themselves pre_processing : function, default None Function to be applied to `values` prior to passing to Cython. Function should return a tuple where the first element is the values to be passed to Cython and the second element is an optional type which the values should be converted to after being returned by the Cython operation. Raises if `needs_values` is False. post_processing : function, default None Function to be applied to result of Cython function. Should accept an array of values as the first argument and type inferences as its second argument, i.e. the signature should be (ndarray, Type). **kwargs : dict Extra arguments to be passed back to Cython funcs Returns ------- `Series` or `DataFrame` with filled values """
if result_is_index and aggregate: raise ValueError("'result_is_index' and 'aggregate' cannot both " "be True!") if post_processing: if not callable(pre_processing): raise ValueError("'post_processing' must be a callable!") if pre_processing: if not callable(pre_processing): raise ValueError("'pre_processing' must be a callable!") if not needs_values: raise ValueError("Cannot use 'pre_processing' without " "specifying 'needs_values'!") labels, _, ngroups = grouper.group_info output = collections.OrderedDict() base_func = getattr(libgroupby, how) for name, obj in self._iterate_slices(): if aggregate: result_sz = ngroups else: result_sz = len(obj.values) if not cython_dtype: cython_dtype = obj.values.dtype result = np.zeros(result_sz, dtype=cython_dtype) func = partial(base_func, result, labels) inferences = None if needs_values: vals = obj.values if pre_processing: vals, inferences = pre_processing(vals) func = partial(func, vals) if needs_mask: mask = isna(obj.values).view(np.uint8) func = partial(func, mask) if needs_ngroups: func = partial(func, ngroups) func(**kwargs) # Call func to modify indexer values in place if result_is_index: result = algorithms.take_nd(obj.values, result) if post_processing: result = post_processing(result, inferences) output[name] = result if aggregate: return self._wrap_aggregated_output(output) else: return self._wrap_transformed_output(output)
<SYSTEM_TASK:> Shift each group by periods observations. <END_TASK> <USER_TASK:> Description: def shift(self, periods=1, freq=None, axis=0, fill_value=None): """ Shift each group by periods observations. Parameters ---------- periods : integer, default 1 number of periods to shift freq : frequency string axis : axis to shift, default 0 fill_value : optional .. versionadded:: 0.24.0 """
if freq is not None or axis != 0 or not isna(fill_value): return self.apply(lambda x: x.shift(periods, freq, axis, fill_value)) return self._get_cythonized_result('group_shift_indexer', self.grouper, cython_dtype=np.int64, needs_ngroups=True, result_is_index=True, periods=periods)
<SYSTEM_TASK:> If holiday falls on Saturday, use following Monday instead; <END_TASK> <USER_TASK:> Description: def next_monday(dt): """ If holiday falls on Saturday, use following Monday instead; if holiday falls on Sunday, use Monday instead """
if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt
<SYSTEM_TASK:> If holiday falls on Saturday or Sunday, use previous Friday instead. <END_TASK> <USER_TASK:> Description: def previous_friday(dt): """ If holiday falls on Saturday or Sunday, use previous Friday instead. """
if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt
<SYSTEM_TASK:> returns next weekday used for observances <END_TASK> <USER_TASK:> Description: def next_workday(dt): """ returns next weekday used for observances """
dt += timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt += timedelta(days=1) return dt
<SYSTEM_TASK:> returns previous weekday used for observances <END_TASK> <USER_TASK:> Description: def previous_workday(dt): """ returns previous weekday used for observances """
dt -= timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt -= timedelta(days=1) return dt
<SYSTEM_TASK:> Calculate holidays observed between start date and end date <END_TASK> <USER_TASK:> Description: def dates(self, start_date, end_date, return_name=False): """ Calculate holidays observed between start date and end date Parameters ---------- start_date : starting date, datetime-like, optional end_date : ending date, datetime-like, optional return_name : bool, optional, default=False If True, return a series that has dates and holiday names. False will only return dates. """
start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) if return_name: return Series(self.name, index=[dt]) else: return [dt] dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek, self.days_of_week)] if self.start_date is not None: filter_start_date = max(self.start_date.tz_localize( filter_start_date.tz), filter_start_date) if self.end_date is not None: filter_end_date = min(self.end_date.tz_localize( filter_end_date.tz), filter_end_date) holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates
<SYSTEM_TASK:> Get reference dates for the holiday. <END_TASK> <USER_TASK:> Description: def _reference_dates(self, start_date, end_date): """ Get reference dates for the holiday. Return reference dates for the holiday also returning the year prior to the start_date and year following the end_date. This ensures that any offsets to be applied will yield the holidays within the passed in dates. """
if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp( datetime(start_date.year - 1, self.month, self.day)) reference_end_date = Timestamp( datetime(end_date.year + 1, self.month, self.day)) # Don't process unnecessary holidays dates = date_range(start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz) return dates
<SYSTEM_TASK:> Returns a curve with holidays between start_date and end_date <END_TASK> <USER_TASK:> Description: def holidays(self, start=None, end=None, return_name=False): """ Returns a curve with holidays between start_date and end_date Parameters ---------- start : starting date, datetime-like, optional end : ending date, datetime-like, optional return_name : bool, optional If True, return a series that has dates and holiday names. False will only return a DatetimeIndex of dates. Returns ------- DatetimeIndex of holidays """
if self.rules is None: raise Exception('Holiday Calendar {name} does not have any ' 'rules specified'.format(name=self.name)) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if (self._cache is None or start < self._cache[0] or end > self._cache[1]): for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
<SYSTEM_TASK:> Merge holiday calendars together. The base calendar <END_TASK> <USER_TASK:> Description: def merge_class(base, other): """ Merge holiday calendars together. The base calendar will take precedence to other. The merge will be done based on each holiday's name. Parameters ---------- base : AbstractHolidayCalendar instance/subclass or array of Holiday objects other : AbstractHolidayCalendar instance/subclass or array of Holiday objects """
try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values())
<SYSTEM_TASK:> Merge holiday calendars together. The caller's class <END_TASK> <USER_TASK:> Description: def merge(self, other, inplace=False): """ Merge holiday calendars together. The caller's class rules take precedence. The merge will be done based on each holiday's name. Parameters ---------- other : holiday calendar inplace : bool (default=False) If True set rule_table to holidays, else return array of Holidays """
holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays
<SYSTEM_TASK:> Register an option in the package-wide pandas config object <END_TASK> <USER_TASK:> Description: def register_option(key, defval, doc='', validator=None, cb=None): """Register an option in the package-wide pandas config object Parameters ---------- key - a fully-qualified key, e.g. "x.y.option - z". defval - the default value of the option doc - a string description of the option validator - a function of a single argument, should raise `ValueError` if called with a value which is not a legal value for the option. cb - a function of a single argument "key", which is called immediately after an option value is set/reset. key is the full name of the option. Returns ------- Nothing. Raises ------ ValueError if `validator` is specified and `defval` is not a valid value. """
import tokenize import keyword key = key.lower() if key in _registered_options: msg = "Option '{key}' has already been registered" raise OptionError(msg.format(key=key)) if key in _reserved_keys: msg = "Option '{key}' is a reserved key" raise OptionError(msg.format(key=key)) # the default value should be legal if validator: validator(defval) # walk the nested dict, creating dicts as needed along the path path = key.split('.') for k in path: if not bool(re.match('^' + tokenize.Name + '$', k)): raise ValueError("{k} is not a valid identifier".format(k=k)) if keyword.iskeyword(k): raise ValueError("{k} is a python keyword".format(k=k)) cursor = _global_config msg = "Path prefix to option '{option}' is already an option" for i, p in enumerate(path[:-1]): if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:i]))) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:-1]))) cursor[path[-1]] = defval # initialize # save the option metadata _registered_options[key] = RegisteredOption(key=key, defval=defval, doc=doc, validator=validator, cb=cb)
<SYSTEM_TASK:> Mark option `key` as deprecated, if code attempts to access this option, <END_TASK> <USER_TASK:> Description: def deprecate_option(key, msg=None, rkey=None, removal_ver=None): """ Mark option `key` as deprecated, if code attempts to access this option, a warning will be produced, using `msg` if given, or a default message if not. if `rkey` is given, any access to the key will be re-routed to `rkey`. Neither the existence of `key` nor that if `rkey` is checked. If they do not exist, any subsequence access will fail as usual, after the deprecation warning is given. Parameters ---------- key - the name of the option to be deprecated. must be a fully-qualified option name (e.g "x.y.z.rkey"). msg - (Optional) a warning message to output when the key is referenced. if no message is given a default message will be emitted. rkey - (Optional) the name of an option to reroute access to. If specified, any referenced `key` will be re-routed to `rkey` including set/get/reset. rkey must be a fully-qualified option name (e.g "x.y.z.rkey"). used by the default message if no `msg` is specified. removal_ver - (Optional) specifies the version in which this option will be removed. used by the default message if no `msg` is specified. Returns ------- Nothing Raises ------ OptionError - if key has already been deprecated. """
key = key.lower() if key in _deprecated_options: msg = "Option '{key}' has already been defined as deprecated." raise OptionError(msg.format(key=key)) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
<SYSTEM_TASK:> returns a list of keys matching `pat` <END_TASK> <USER_TASK:> Description: def _select_options(pat): """returns a list of keys matching `pat` if pat=="all", returns all registered options """
# short-circuit for exact key if pat in _registered_options: return [pat] # else look through all of them keys = sorted(_registered_options.keys()) if pat == 'all': # reserved key return keys return [k for k in keys if re.search(pat, k, re.I)]
<SYSTEM_TASK:> if key id deprecated and a replacement key defined, will return the <END_TASK> <USER_TASK:> Description: def _translate_key(key): """ if key id deprecated and a replacement key defined, will return the replacement key, otherwise returns `key` as - is """
d = _get_deprecated_option(key) if d: return d.rkey or key else: return key
<SYSTEM_TASK:> Builds a formatted description of a registered option and prints it <END_TASK> <USER_TASK:> Description: def _build_option_description(k): """ Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k) d = _get_deprecated_option(k) s = '{k} '.format(k=k) if o.doc: s += '\n'.join(o.doc.strip().split('\n')) else: s += 'No description available.' if o: s += ('\n [default: {default}] [currently: {current}]' .format(default=o.defval, current=_get_option(k, True))) if d: s += '\n (Deprecated' s += (', use `{rkey}` instead.' .format(rkey=d.rkey if d.rkey else '')) s += ')' return s
<SYSTEM_TASK:> contextmanager for multiple invocations of API with a common prefix <END_TASK> <USER_TASK:> Description: def config_prefix(prefix): """contextmanager for multiple invocations of API with a common prefix supported API functions: (register / get / set )__option Warning: This is not thread - safe, and won't work properly if you import the API functions into your module using the "from x import y" construct. Example: import pandas._config.config as cf with cf.config_prefix("display.font"): cf.register_option("color", "red") cf.register_option("size", " 5 pt") cf.set_option(size, " 6 pt") cf.get_option(size) ... etc' will register options "display.font.color", "display.font.size", set the value of "display.font.size"... and so on. """
# Note: reset_option relies on set_option, and on key directly # it does not fit in to this monkey-patching scheme global register_option, get_option, set_option, reset_option def wrap(func): def inner(key, *args, **kwds): pkey = '{prefix}.{key}'.format(prefix=prefix, key=key) return func(pkey, *args, **kwds) return inner _register_option = register_option _get_option = get_option _set_option = set_option set_option = wrap(set_option) get_option = wrap(get_option) register_option = wrap(register_option) yield None set_option = _set_option get_option = _get_option register_option = _register_option
<SYSTEM_TASK:> Try to do platform conversion, with special casing for IntervalArray. <END_TASK> <USER_TASK:> Description: def maybe_convert_platform_interval(values): """ Try to do platform conversion, with special casing for IntervalArray. Wrapper around maybe_convert_platform that alters the default return dtype in certain cases to be compatible with IntervalArray. For example, empty lists return with integer dtype instead of object dtype, which is prohibited for IntervalArray. Parameters ---------- values : array-like Returns ------- array """
if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is not # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif is_categorical_dtype(values): values = np.asarray(values) return maybe_convert_platform(values)
<SYSTEM_TASK:> Check if the object is a file-like object. <END_TASK> <USER_TASK:> Description: def is_file_like(obj): """ Check if the object is a file-like object. For objects to be considered file-like, they must be an iterator AND have either a `read` and/or `write` method as an attribute. Note: file-like objects must be iterable, but iterable objects need not be file-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_file_like : bool Whether `obj` has file-like properties. Examples -------- >>> buffer(StringIO("data")) >>> is_file_like(buffer) True >>> is_file_like([1, 2, 3]) False """
if not (hasattr(obj, 'read') or hasattr(obj, 'write')): return False if not hasattr(obj, "__iter__"): return False return True
<SYSTEM_TASK:> Check if the object is list-like. <END_TASK> <USER_TASK:> Description: def is_list_like(obj, allow_sets=True): """ Check if the object is list-like. Objects that are considered list-like are for example Python lists, tuples, sets, NumPy arrays, and Pandas Series. Strings and datetime objects, however, are not considered list-like. Parameters ---------- obj : The object to check allow_sets : boolean, default True If this parameter is False, sets will not be considered list-like .. versionadded:: 0.24.0 Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_list_like([1, 2, 3]) True >>> is_list_like({1, 2, 3}) True >>> is_list_like(datetime(2017, 1, 1)) False >>> is_list_like("foo") False >>> is_list_like(1) False >>> is_list_like(np.array([2])) True >>> is_list_like(np.array(2))) False """
return (isinstance(obj, abc.Iterable) and # we do not count strings/unicode/bytes as list-like not isinstance(obj, (str, bytes)) and # exclude zero-dimensional numpy arrays, effectively scalars not (isinstance(obj, np.ndarray) and obj.ndim == 0) and # exclude sets if allow_sets is False not (allow_sets is False and isinstance(obj, abc.Set)))
<SYSTEM_TASK:> Check if the object is list-like, and that all of its elements <END_TASK> <USER_TASK:> Description: def is_nested_list_like(obj): """ Check if the object is list-like, and that all of its elements are also list-like. .. versionadded:: 0.20.0 Parameters ---------- obj : The object to check Returns ------- is_list_like : bool Whether `obj` has list-like properties. Examples -------- >>> is_nested_list_like([[1, 2, 3]]) True >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}]) True >>> is_nested_list_like(["foo"]) False >>> is_nested_list_like([]) False >>> is_nested_list_like([[1, 2, 3], 1]) False Notes ----- This won't reliably detect whether a consumable iterator (e. g. a generator) is a nested-list-like without consuming the iterator. To avoid consuming it, we always return False if the outer container doesn't define `__len__`. See Also -------- is_list_like """
return (is_list_like(obj) and hasattr(obj, '__len__') and len(obj) > 0 and all(is_list_like(item) for item in obj))
<SYSTEM_TASK:> Check if the object is dict-like. <END_TASK> <USER_TASK:> Description: def is_dict_like(obj): """ Check if the object is dict-like. Parameters ---------- obj : The object to check Returns ------- is_dict_like : bool Whether `obj` has dict-like properties. Examples -------- >>> is_dict_like({1: 2}) True >>> is_dict_like([1, 2, 3]) False >>> is_dict_like(dict) False >>> is_dict_like(dict()) True """
dict_like_attrs = ("__getitem__", "keys", "__contains__") return (all(hasattr(obj, attr) for attr in dict_like_attrs) # [GH 25196] exclude classes and not isinstance(obj, type))
<SYSTEM_TASK:> Check if the object is a sequence of objects. <END_TASK> <USER_TASK:> Description: def is_sequence(obj): """ Check if the object is a sequence of objects. String types are not included as sequences here. Parameters ---------- obj : The object to check Returns ------- is_sequence : bool Whether `obj` is a sequence of objects. Examples -------- >>> l = [1, 2, 3] >>> >>> is_sequence(l) True >>> is_sequence(iter(l)) False """
try: iter(obj) # Can iterate over it. len(obj) # Has a length associated with it. return not isinstance(obj, (str, bytes)) except (TypeError, AttributeError): return False
<SYSTEM_TASK:> Return a fixed frequency DatetimeIndex. <END_TASK> <USER_TASK:> Description: def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex. Parameters ---------- start : str or datetime-like, optional Left bound for generating dates. end : str or datetime-like, optional Right bound for generating dates. periods : integer, optional Number of periods to generate. freq : str or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H'. See :ref:`here <timeseries.offset_aliases>` for a list of frequency aliases. tz : str or tzinfo, optional Time zone name for returning localized DatetimeIndex, for example 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is timezone-naive. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : str, default None Name of the resulting DatetimeIndex. closed : {None, 'left', 'right'}, optional Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None, the default). **kwargs For compatibility. Has no effect on the result. Returns ------- rng : DatetimeIndex See Also -------- DatetimeIndex : An immutable container for datetimes. timedelta_range : Return a fixed frequency TimedeltaIndex. period_range : Return a fixed frequency PeriodIndex. interval_range : Return a fixed frequency IntervalIndex. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``DatetimeIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- **Specifying the values** The next four examples generate the same `DatetimeIndex`, but vary the combination of `start`, `end` and `periods`. Specify `start` and `end`, with the default daily frequency. >>> pd.date_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `start` and `periods`, the number of periods (days). >>> pd.date_range(start='1/1/2018', periods=8) DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'], dtype='datetime64[ns]', freq='D') Specify `end` and `periods`, the number of periods (days). >>> pd.date_range(end='1/1/2018', periods=8) DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28', '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'], dtype='datetime64[ns]', freq='D') Specify `start`, `end`, and `periods`; the frequency is generated automatically (linearly spaced). >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3) DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00', '2018-04-27 00:00:00'], dtype='datetime64[ns]', freq=None) **Other Parameters** Changed the `freq` (frequency) to ``'M'`` (month end frequency). >>> pd.date_range(start='1/1/2018', periods=5, freq='M') DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30', '2018-05-31'], dtype='datetime64[ns]', freq='M') Multiples are allowed >>> pd.date_range(start='1/1/2018', periods=5, freq='3M') DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') `freq` can also be specified as an Offset object. >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)) DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31', '2019-01-31'], dtype='datetime64[ns]', freq='3M') Specify `tz` to set the timezone. >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo') DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00', '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00', '2018-01-05 00:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='D') `closed` controls whether to include `start` and `end` that are on the boundary. The default includes boundary points on either end. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None) DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') Use ``closed='left'`` to exclude `end` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left') DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq='D') Use ``closed='right'`` to exclude `start` if it falls on the boundary. >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right') DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq='D') """
if freq is None and com._any_none(periods, start, end): freq = 'D' dtarr = DatetimeArray._generate_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, closed=closed, **kwargs) return DatetimeIndex._simple_new( dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name)
<SYSTEM_TASK:> Return a fixed frequency DatetimeIndex, with business day as the default <END_TASK> <USER_TASK:> Description: def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, normalize=True, name=None, weekmask=None, holidays=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with business day as the default frequency Parameters ---------- start : string or datetime-like, default None Left bound for generating dates. end : string or datetime-like, default None Right bound for generating dates. periods : integer, default None Number of periods to generate. freq : string or DateOffset, default 'B' (business daily) Frequency strings can have multiples, e.g. '5H'. tz : string or None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing. normalize : bool, default False Normalize start/end dates to midnight before generating date range. name : string, default None Name of the resulting DatetimeIndex. weekmask : string or None, default None Weekmask of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. The default value None is equivalent to 'Mon Tue Wed Thu Fri'. .. versionadded:: 0.21.0 holidays : list-like or None, default None Dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar``, only used when custom frequency strings are passed. .. versionadded:: 0.21.0 closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None). **kwargs For compatibility. Has no effect on the result. Returns ------- DatetimeIndex Notes ----- Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. Specifying ``freq`` is a requirement for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not desired. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Note how the two weekend days are skipped in the result. >>> pd.bdate_range(start='1/1/2018', end='1/08/2018') DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04', '2018-01-05', '2018-01-08'], dtype='datetime64[ns]', freq='B') """
if freq is None: msg = 'freq must be specified for bdate_range; use date_range instead' raise TypeError(msg) if is_string_like(freq) and freq.startswith('C'): try: weekmask = weekmask or 'Mon Tue Wed Thu Fri' freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError): msg = 'invalid custom frequency string: {freq}'.format(freq=freq) raise ValueError(msg) elif holidays or weekmask: msg = ('a custom frequency string is required when holidays or ' 'weekmask are passed, got frequency {freq}').format(freq=freq) raise ValueError(msg) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
<SYSTEM_TASK:> Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the <END_TASK> <USER_TASK:> Description: def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): """ Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the default frequency .. deprecated:: 0.21.0 Parameters ---------- start : string or datetime-like, default None Left bound for generating dates end : string or datetime-like, default None Right bound for generating dates periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'C' (CustomBusinessDay) Frequency strings can have multiples, e.g. '5H' tz : string, default None Time zone name for returning localized DatetimeIndex, for example Asia/Beijing normalize : bool, default False Normalize start/end dates to midnight before generating date range name : string, default None Name of the resulting DatetimeIndex weekmask : string, Default 'Mon Tue Wed Thu Fri' weekmask of valid business days, passed to ``numpy.busdaycalendar`` holidays : list list/array of dates to exclude from the set of valid business days, passed to ``numpy.busdaycalendar`` closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Notes ----- Of the three parameters: ``start``, ``end``, and ``periods``, exactly two must be specified. To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Returns ------- rng : DatetimeIndex """
warnings.warn("cdate_range is deprecated and will be removed in a future " "version, instead use pd.bdate_range(..., freq='{freq}')" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
<SYSTEM_TASK:> Split data into blocks & return conformed data. <END_TASK> <USER_TASK:> Description: def _create_blocks(self): """ Split data into blocks & return conformed data. """
obj, index = self._convert_freq() if index is not None: index = self._on # filter out the on from the object if self.on is not None: if obj.ndim == 2: obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) blocks = obj._to_dict_of_blocks(copy=False).values() return blocks, obj, index
<SYSTEM_TASK:> Wrap a single result. <END_TASK> <USER_TASK:> Description: def _wrap_result(self, result, block=None, obj=None): """ Wrap a single result. """
if obj is None: obj = self._selected_obj index = obj.index if isinstance(result, np.ndarray): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): from pandas import to_timedelta result = to_timedelta( result.ravel(), unit='ns').values.reshape(result.shape) if result.ndim == 1: from pandas import Series return Series(result, index, name=obj.name) return type(obj)(result, index=index, columns=block.columns) return result
<SYSTEM_TASK:> Wrap the results. <END_TASK> <USER_TASK:> Description: def _wrap_results(self, results, blocks, obj): """ Wrap the results. Parameters ---------- results : list of ndarrays blocks : list of blocks obj : conformed data (may be resampled) """
from pandas import Series, concat from pandas.core.index import ensure_index final = [] for result, block in zip(results, blocks): result = self._wrap_result(result, block=block, obj=obj) if result.ndim == 1: return result final.append(result) # if we have an 'on' column # we want to put it back into the results # in the same location columns = self._selected_obj.columns if self.on is not None and not self._on.equals(obj.index): name = self._on.name final.append(Series(self._on, index=obj.index, name=name)) if self._selection is not None: selection = ensure_index(self._selection) # need to reorder to include original location of # the on column (if its not already there) if name not in selection: columns = self.obj.columns indexer = columns.get_indexer(selection.tolist() + [name]) columns = columns.take(sorted(indexer)) if not len(final): return obj.astype('float64') return concat(final, axis=1).reindex(columns=columns, copy=False)
<SYSTEM_TASK:> Center the result in the window. <END_TASK> <USER_TASK:> Description: def _center_window(self, result, window): """ Center the result in the window. """
if self.axis > result.ndim - 1: raise ValueError("Requested axis is larger then no. of argument " "dimensions") offset = _offset(window, True) if offset > 0: if isinstance(result, (ABCSeries, ABCDataFrame)): result = result.slice_shift(-offset, axis=self.axis) else: lead_indexer = [slice(None)] * result.ndim lead_indexer[self.axis] = slice(offset, None) result = np.copy(result[tuple(lead_indexer)]) return result
<SYSTEM_TASK:> Provide validation for our window type, return the window <END_TASK> <USER_TASK:> Description: def _prep_window(self, **kwargs): """ Provide validation for our window type, return the window we have already been validated. """
window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig # the below may pop from kwargs def _validate_win_type(win_type, kwargs): arg_map = {'kaiser': ['beta'], 'gaussian': ['std'], 'general_gaussian': ['power', 'width'], 'slepian': ['width']} if win_type in arg_map: return tuple([win_type] + _pop_args(win_type, arg_map[win_type], kwargs)) return win_type def _pop_args(win_type, arg_names, kwargs): msg = '%s window requires %%s' % win_type all_args = [] for n in arg_names: if n not in kwargs: raise ValueError(msg % n) all_args.append(kwargs.pop(n)) return all_args win_type = _validate_win_type(self.win_type, kwargs) # GH #15662. `False` makes symmetric window, rather than periodic. return sig.get_window(win_type, window, False).astype(float)
<SYSTEM_TASK:> Applies a moving window of type ``window_type`` on the data. <END_TASK> <USER_TASK:> Description: def _apply_window(self, mean=True, **kwargs): """ Applies a moving window of type ``window_type`` on the data. Parameters ---------- mean : bool, default True If True computes weighted mean, else weighted sum Returns ------- y : same type as input argument """
window = self._prep_window(**kwargs) center = self.center blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def f(arg, *args, **kwargs): minp = _use_window(self.min_periods, len(window)) return libwindow.roll_window(np.concatenate((arg, additional_nans)) if center else arg, window, minp, avg=mean) result = np.apply_along_axis(f, self.axis, values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
<SYSTEM_TASK:> Dispatch to apply; we are stripping all of the _apply kwargs and <END_TASK> <USER_TASK:> Description: def _apply(self, func, name, window=None, center=None, check_minp=None, **kwargs): """ Dispatch to apply; we are stripping all of the _apply kwargs and performing the original function call on the grouped object. """
def f(x, name=name, *args): x = self._shallow_copy(x) if isinstance(name, str): return getattr(x, name)(*args, **kwargs) return x.apply(name, *args, **kwargs) return self._groupby.apply(f)
<SYSTEM_TASK:> Rolling statistical measure using supplied function. <END_TASK> <USER_TASK:> Description: def _apply(self, func, name=None, window=None, center=None, check_minp=None, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply name : str, optional name of this function window : int/array, default to _get_window() center : bool, default to self.center check_minp : function, default to _use_window Returns ------- y : type of input """
if center is None: center = self.center if window is None: window = self._get_window() if check_minp is None: check_minp = _use_window blocks, obj, index = self._create_blocks() index, indexi = self._get_index(index=index) results = [] for b in blocks: values = self._prep_values(b.values) if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats arg = ensure_float64(arg) return cfunc(arg, window, minp, indexi, closed, **kwargs) # calculation function if center: offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def calc(x): return func(np.concatenate((x, additional_nans)), window, min_periods=self.min_periods, closed=self.closed) else: def calc(x): return func(x, window, min_periods=self.min_periods, closed=self.closed) with np.errstate(all='ignore'): if values.ndim > 1: result = np.apply_along_axis(calc, self.axis, values) else: result = calc(values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
<SYSTEM_TASK:> Get the window length over which to perform some operation. <END_TASK> <USER_TASK:> Description: def _get_window(self, other=None): """ Get the window length over which to perform some operation. Parameters ---------- other : object, default None The other object that is involved in the operation. Such an object is involved for operations like covariance. Returns ------- window : int The window length. """
axis = self.obj._get_axis(self.axis) length = len(axis) + (other is not None) * len(axis) other = self.min_periods or -1 return max(length, other)
<SYSTEM_TASK:> Rolling statistical measure using supplied function. Designed to be <END_TASK> <USER_TASK:> Description: def _apply(self, func, **kwargs): """ Rolling statistical measure using supplied function. Designed to be used with passed-in Cython array-based functions. Parameters ---------- func : str/callable to apply Returns ------- y : same type as input argument """
blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg): return cfunc(arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods)) results.append(np.apply_along_axis(func, self.axis, values)) return self._wrap_results(results, blocks, obj)
<SYSTEM_TASK:> Exponential weighted moving average. <END_TASK> <USER_TASK:> Description: def mean(self, *args, **kwargs): """ Exponential weighted moving average. Parameters ---------- *args, **kwargs Arguments and keyword arguments to be passed into func. """
nv.validate_window_func('mean', args, kwargs) return self._apply('ewma', **kwargs)
<SYSTEM_TASK:> Exponential weighted moving stddev. <END_TASK> <USER_TASK:> Description: def std(self, bias=False, *args, **kwargs): """ Exponential weighted moving stddev. """
nv.validate_window_func('std', args, kwargs) return _zsqrt(self.var(bias=bias, **kwargs))
<SYSTEM_TASK:> Exponential weighted moving variance. <END_TASK> <USER_TASK:> Description: def var(self, bias=False, *args, **kwargs): """ Exponential weighted moving variance. """
nv.validate_window_func('var', args, kwargs) def f(arg): return libwindow.ewmcov(arg, arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return self._apply(f, **kwargs)
<SYSTEM_TASK:> Exponential weighted sample covariance. <END_TASK> <USER_TASK:> Description: def cov(self, other=None, pairwise=None, bias=False, **kwargs): """ Exponential weighted sample covariance. """
if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_cov(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) cov = libwindow.ewmcov(X._prep_values(), Y._prep_values(), self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return X._wrap_result(cov) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))
<SYSTEM_TASK:> Exponential weighted sample correlation. <END_TASK> <USER_TASK:> Description: def corr(self, other=None, pairwise=None, **kwargs): """ Exponential weighted sample correlation. """
if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_corr(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) def _cov(x, y): return libwindow.ewmcov(x, y, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), 1) x_values = X._prep_values() y_values = Y._prep_values() with np.errstate(all='ignore'): cov = _cov(x_values, y_values) x_var = _cov(x_values, x_values) y_var = _cov(y_values, y_values) corr = cov / _zsqrt(x_var * y_var) return X._wrap_result(corr) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))
<SYSTEM_TASK:> Makes sure that time and panels are conformable. <END_TASK> <USER_TASK:> Description: def _ensure_like_indices(time, panels): """ Makes sure that time and panels are conformable. """
n_time = len(time) n_panel = len(panels) u_panels = np.unique(panels) # this sorts! u_time = np.unique(time) if len(u_time) == n_time: time = np.tile(u_time, len(u_panels)) if len(u_panels) == n_panel: panels = np.repeat(u_panels, len(u_time)) return time, panels
<SYSTEM_TASK:> Returns a multi-index suitable for a panel-like DataFrame. <END_TASK> <USER_TASK:> Description: def panel_index(time, panels, names=None): """ Returns a multi-index suitable for a panel-like DataFrame. Parameters ---------- time : array-like Time index, does not have to repeat panels : array-like Panel index, does not have to repeat names : list, optional List containing the names of the indices Returns ------- multi_index : MultiIndex Time index is the first level, the panels are the second level. Examples -------- >>> years = range(1960,1963) >>> panels = ['A', 'B', 'C'] >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'), (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'), (1962, 'C')], dtype=object) or >>> years = np.repeat(range(1960,1963), 3) >>> panels = np.tile(['A', 'B', 'C'], 3) >>> panel_idx = panel_index(years, panels) >>> panel_idx MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'), (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'), (1962, 'C')], dtype=object) """
if names is None: names = ['time', 'panel'] time, panels = _ensure_like_indices(time, panels) return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
<SYSTEM_TASK:> Construct Panel from dict of DataFrame objects. <END_TASK> <USER_TASK:> Description: def from_dict(cls, data, intersect=False, orient='items', dtype=None): """ Construct Panel from dict of DataFrame objects. Parameters ---------- data : dict {field : DataFrame} intersect : boolean Intersect indexes of input DataFrames orient : {'items', 'minor'}, default 'items' The "orientation" of the data. If the keys of the passed dict should be the items of the result panel, pass 'items' (default). Otherwise if the columns of the values of the passed DataFrame objects should be the items (which in the case of mixed-dtype data you should do), instead pass 'minor' dtype : dtype, default None Data type to force, otherwise infer Returns ------- Panel """
from collections import defaultdict orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) for col, df in data.items(): for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover raise ValueError('Orientation must be one of {items, minor}.') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = list(d['data'].keys()) if not isinstance(d['data'], OrderedDict): ks = list(sorted(ks)) d[cls._info_axis_name] = Index(ks) return cls(**d)
<SYSTEM_TASK:> Write each DataFrame in Panel to a separate excel sheet. <END_TASK> <USER_TASK:> Description: def to_excel(self, path, na_rep='', engine=None, **kwargs): """ Write each DataFrame in Panel to a separate excel sheet. Parameters ---------- path : string or ExcelWriter object File path or existing ExcelWriter na_rep : string, default '' Missing data representation engine : string, default None write engine to use - you can also set this via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and ``io.excel.xlsm.writer``. Other Parameters ---------------- float_format : string, default None Format string for floating point numbers cols : sequence, optional Columns to write header : boolean or list of string, default True Write out column names. If a list of string is given it is assumed to be aliases for the column names index : boolean, default True Write row names (index) index_label : string or sequence, default None Column label for index column(s) if desired. If None is given, and `header` and `index` are True, then the index names are used. A sequence should be given if the DataFrame uses MultiIndex. startrow : upper left cell row to dump data frame startcol : upper left cell column to dump data frame Notes ----- Keyword arguments (and na_rep) are passed to the ``to_excel`` method for each DataFrame written. """
from pandas.io.excel import ExcelWriter if isinstance(path, str): writer = ExcelWriter(path, engine=engine) else: writer = path kwargs['na_rep'] = na_rep for item, df in self.iteritems(): name = str(item) df.to_excel(writer, name, **kwargs) writer.save()
<SYSTEM_TASK:> Unpickle the panel. <END_TASK> <USER_TASK:> Description: def _unpickle_panel_compat(self, state): # pragma: no cover """ Unpickle the panel. """
from pandas.io.pickle import _unpickle_array _unpickle = _unpickle_array vals, items, major, minor = state items = _unpickle(items) major = _unpickle(major) minor = _unpickle(minor) values = _unpickle(vals) wp = Panel(values, items, major, minor) self._data = wp._data
<SYSTEM_TASK:> Conform input DataFrame to align with chosen axis pair. <END_TASK> <USER_TASK:> Description: def conform(self, frame, axis='items'): """ Conform input DataFrame to align with chosen axis pair. Parameters ---------- frame : DataFrame axis : {'items', 'major', 'minor'} Axis the input corresponds to. E.g., if axis='major', then the frame's columns would be items, and the index would be values of the minor axis Returns ------- DataFrame """
axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes))
<SYSTEM_TASK:> Round each value in Panel to a specified number of decimal places. <END_TASK> <USER_TASK:> Description: def round(self, decimals=0, *args, **kwargs): """ Round each value in Panel to a specified number of decimal places. .. versionadded:: 0.18.0 Parameters ---------- decimals : int Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Panel object See Also -------- numpy.around """
nv.validate_round(args, kwargs) if is_integer(decimals): result = np.apply_along_axis(np.round, 0, self.values) return self._wrap_result(result, axis=0) raise TypeError("decimals must be an integer")
<SYSTEM_TASK:> Drop 2D from panel, holding passed axis constant. <END_TASK> <USER_TASK:> Description: def dropna(self, axis=0, how='any', inplace=False): """ Drop 2D from panel, holding passed axis constant. Parameters ---------- axis : int, default 0 Axis to hold constant. E.g. axis=1 will drop major_axis entries having a certain amount of NA data how : {'all', 'any'}, default 'any' 'any': one or more values are NA in the DataFrame along the axis. For 'all' they all must be. inplace : bool, default False If True, do operation inplace and return None. Returns ------- dropped : Panel """
axis = self._get_axis_number(axis) values = self.values mask = notna(values) for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) if how == 'all': cond = mask > 0 else: cond = mask == per_slice new_ax = self._get_axis(axis)[cond] result = self.reindex_axis(new_ax, axis=axis) if inplace: self._update_inplace(result) else: return result
<SYSTEM_TASK:> Return slice of panel along selected axis. <END_TASK> <USER_TASK:> Description: def xs(self, key, axis=1): """ Return slice of panel along selected axis. Parameters ---------- key : object Label axis : {'items', 'major', 'minor}, default 1/'major' Returns ------- y : ndim(self)-1 Notes ----- xs is only for getting, not setting values. MultiIndex Slicers is a generic way to get/set values on any level or levels and is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>` """
axis = self._get_axis_number(axis) if axis == 0: return self[key] self._consolidate_inplace() axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) copy = new_data.is_mixed_type result._set_is_copy(self, copy=copy) return result
<SYSTEM_TASK:> Handle 2-d slices, equiv to iterating over the other axis. <END_TASK> <USER_TASK:> Description: def _apply_2d(self, func, axis): """ Handle 2-d slices, equiv to iterating over the other axis. """
ndim = self.ndim axis = [self._get_axis_number(a) for a in axis] # construct slabs, in 2-d this is a DataFrame result indexer_axis = list(range(ndim)) for a in axis: indexer_axis.remove(a) indexer_axis = indexer_axis[0] slicer = [slice(None, None)] * ndim ax = self._get_axis(indexer_axis) results = [] for i, e in enumerate(ax): slicer[indexer_axis] = i sliced = self.iloc[tuple(slicer)] obj = func(sliced) results.append((e, obj)) return self._construct_return_type(dict(results))
<SYSTEM_TASK:> Return the type for the ndim of the result. <END_TASK> <USER_TASK:> Description: def _construct_return_type(self, result, axes=None): """ Return the type for the ndim of the result. """
ndim = getattr(result, 'ndim', None) # need to assume they are the same if ndim is None: if isinstance(result, dict): ndim = getattr(list(result.values())[0], 'ndim', 0) # have a dict, so top-level is +1 dim if ndim != 0: ndim += 1 # scalar if ndim == 0: return Series(result) # same as self elif self.ndim == ndim: # return the construction dictionary for these axes if axes is None: return self._constructor(result) return self._constructor(result, **self._construct_axes_dict()) # sliced elif self.ndim == ndim + 1: if axes is None: return self._constructor_sliced(result) return self._constructor_sliced( result, **self._extract_axes_for_slice(self, axes)) raise ValueError('invalid _construct_return_type [self->{self}] ' '[result->{result}]'.format(self=self, result=result))
<SYSTEM_TASK:> Return number of observations over requested axis. <END_TASK> <USER_TASK:> Description: def count(self, axis='major'): """ Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame """
i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype='int64') return self._wrap_result(result, axis)
<SYSTEM_TASK:> Shift index by desired number of periods with an optional time freq. <END_TASK> <USER_TASK:> Description: def shift(self, periods=1, freq=None, axis='major'): """ Shift index by desired number of periods with an optional time freq. The shifted data will not include the dropped periods and the shifted axis will be smaller than the original. This is different from the behavior of DataFrame.shift() Parameters ---------- periods : int Number of periods to move, can be positive or negative freq : DateOffset, timedelta, or time rule string, optional axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- shifted : Panel """
if freq: return self.tshift(periods, freq, axis=axis) return super().slice_shift(periods, axis=axis)
<SYSTEM_TASK:> Join items with other Panel either on major and minor axes column. <END_TASK> <USER_TASK:> Description: def join(self, other, how='left', lsuffix='', rsuffix=''): """ Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel """
from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) this = self.reindex(major=join_major, minor=join_minor) other = other.reindex(major=join_major, minor=join_minor) merged_data = this._data.merge(other._data, lsuffix, rsuffix) return self._constructor(merged_data) else: if lsuffix or rsuffix: raise ValueError('Suffixes not supported when passing ' 'multiple panels') if how == 'left': how = 'outer' join_axes = [self.major_axis, self.minor_axis] elif how == 'right': raise ValueError('Right join not supported with multiple ' 'panels') else: join_axes = None return concat([self] + list(other), axis=0, join=how, join_axes=join_axes, verify_integrity=True)
<SYSTEM_TASK:> Modify Panel in place using non-NA values from other Panel. <END_TASK> <USER_TASK:> Description: def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): """ Modify Panel in place using non-NA values from other Panel. May also use object coercible to Panel. Will align on items. Parameters ---------- other : Panel, or object coercible to Panel The object from which the caller will be udpated. join : {'left', 'right', 'outer', 'inner'}, default 'left' How individual DataFrames are joined. overwrite : bool, default True If True then overwrite values for common keys in the calling Panel. filter_func : callable(1d-array) -> 1d-array<bool>, default None Can choose to replace values other than NA. Return True for values that should be updated. errors : {'raise', 'ignore'}, default 'ignore' If 'raise', will raise an error if a DataFrame and other both. .. versionchanged :: 0.24.0 Changed from `raise_conflict=False|True` to `errors='ignore'|'raise'`. See Also -------- DataFrame.update : Similar method for DataFrames. dict.update : Similar method for dictionaries. """
if not isinstance(other, self._constructor): other = self._constructor(other) axis_name = self._info_axis_name axis_values = self._info_axis other = other.reindex(**{axis_name: axis_values}) for frame in axis_values: self[frame].update(other[frame], join=join, overwrite=overwrite, filter_func=filter_func, errors=errors)
<SYSTEM_TASK:> Return a list of the axis indices. <END_TASK> <USER_TASK:> Description: def _extract_axes(self, data, axes, **kwargs): """ Return a list of the axis indices. """
return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)]
<SYSTEM_TASK:> Return the slice dictionary for these axes. <END_TASK> <USER_TASK:> Description: def _extract_axes_for_slice(self, axes): """ Return the slice dictionary for these axes. """
return {self._AXIS_SLICEMAP[i]: a for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
<SYSTEM_TASK:> reconstruct labels from observed group ids <END_TASK> <USER_TASK:> Description: def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull): """ reconstruct labels from observed group ids Parameters ---------- xnull: boolean, if nulls are excluded; i.e. -1 labels are passed through """
if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8') shape = np.asarray(shape, dtype='i8') + lift if not is_int64_overflow_possible(shape): # obs ids are deconstructable! take the fast route! out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() \ else [x - y for x, y in zip(out, lift)] i = unique_label_indices(comp_ids) i8copy = lambda a: a.astype('i8', subok=False, copy=True) return [i8copy(lab[i]) for lab in labels]
<SYSTEM_TASK:> Attempt to prevent foot-shooting in a helpful way. <END_TASK> <USER_TASK:> Description: def _check_ne_builtin_clash(expr): """Attempt to prevent foot-shooting in a helpful way. Parameters ---------- terms : Term Terms can contain """
names = expr.names overlap = names & _ne_builtins if overlap: s = ', '.join(map(repr, overlap)) raise NumExprClobberingError('Variables in expression "{expr}" ' 'overlap with builtins: ({s})' .format(expr=expr, s=s))
<SYSTEM_TASK:> Run the engine on the expression <END_TASK> <USER_TASK:> Description: def evaluate(self): """Run the engine on the expression This method performs alignment which is necessary no matter what engine is being used, thus its implementation is in the base class. Returns ------- obj : object The result of the passed expression. """
if not self._is_aligned: self.result_type, self.aligned_axes = _align(self.expr.terms) # make sure no names in resolvers and locals/globals clash res = self._evaluate() return _reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type)
<SYSTEM_TASK:> Find the appropriate Block subclass to use for the given values and dtype. <END_TASK> <USER_TASK:> Description: def get_block_type(values, dtype=None): """ Find the appropriate Block subclass to use for the given values and dtype. Parameters ---------- values : ndarray-like dtype : numpy or pandas dtype Returns ------- cls : class, subclass of Block """
dtype = dtype or values.dtype vtype = dtype.type if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif is_categorical(values): cls = CategoricalBlock elif issubclass(vtype, np.datetime64): assert not is_datetime64tz_dtype(values) cls = DatetimeBlock elif is_datetime64tz_dtype(values): cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): cls = ExtensionBlock elif issubclass(vtype, np.floating): cls = FloatBlock elif issubclass(vtype, np.timedelta64): assert issubclass(vtype, np.integer) cls = TimeDeltaBlock elif issubclass(vtype, np.complexfloating): cls = ComplexBlock elif issubclass(vtype, np.integer): cls = IntBlock elif dtype == np.bool_: cls = BoolBlock else: cls = ObjectBlock return cls
<SYSTEM_TASK:> return a new extended blocks, givin the result <END_TASK> <USER_TASK:> Description: def _extend_blocks(result, blocks=None): """ return a new extended blocks, givin the result """
from pandas.core.internals import BlockManager if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) elif isinstance(result, BlockManager): blocks.extend(result.blocks) else: blocks.append(result) return blocks
<SYSTEM_TASK:> guarantee the shape of the values to be at least 1 d <END_TASK> <USER_TASK:> Description: def _block_shape(values, ndim=1, shape=None): """ guarantee the shape of the values to be at least 1 d """
if values.ndim < ndim: if shape is None: shape = values.shape if not is_extension_array_dtype(values): # TODO: https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = values.reshape(tuple((1, ) + shape)) return values
<SYSTEM_TASK:> Return a new ndarray, try to preserve dtype if possible. <END_TASK> <USER_TASK:> Description: def _putmask_smart(v, m, n): """ Return a new ndarray, try to preserve dtype if possible. Parameters ---------- v : `values`, updated in-place (array like) m : `mask`, applies to both sides (array like) n : `new values` either scalar or an array like aligned with `values` Returns ------- values : ndarray with updated values this *may* be a copy of the original See Also -------- ndarray.putmask """
# we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings # n should be the length of the mask or a scalar here if not is_list_like(n): n = np.repeat(n, len(m)) elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar n = np.repeat(np.array(n, ndmin=1), len(m)) # see if we are only masking values that if putted # will work in the current dtype try: nn = n[m] # make sure that we have a nullable type # if we have nulls if not _isna_compat(v, nn[0]): raise ValueError # we ignore ComplexWarning here with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", np.ComplexWarning) nn_at = nn.astype(v.dtype) # avoid invalid dtype comparisons # between numbers & strings # only compare integers/floats # don't compare integers to datetimelikes if (not is_numeric_v_string_like(nn, nn_at) and (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype) and is_float_dtype(nn_at.dtype) or is_integer_dtype(nn_at.dtype))): comp = (nn == nn_at) if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv except (ValueError, IndexError, TypeError, OverflowError): pass n = np.asarray(n) def _putmask_preserve(nv, n): try: nv[m] = n[m] except (IndexError, ValueError): nv[m] = n return nv # preserves dtype if possible if v.dtype.kind == n.dtype.kind: return _putmask_preserve(v, n) # change the dtype if needed dtype, _ = maybe_promote(n.dtype) if is_extension_type(v.dtype) and is_object_dtype(dtype): v = v.get_values(dtype) else: v = v.astype(dtype) return _putmask_preserve(v, n)
<SYSTEM_TASK:> ndim inference and validation. <END_TASK> <USER_TASK:> Description: def _check_ndim(self, values, ndim): """ ndim inference and validation. Infers ndim from 'values' if not provided to __init__. Validates that values.ndim and ndim are consistent if and only if the class variable '_validate_ndim' is True. Parameters ---------- values : array-like ndim : int or None Returns ------- ndim : int Raises ------ ValueError : the number of dimensions do not match """
if ndim is None: ndim = values.ndim if self._validate_ndim and values.ndim != ndim: msg = ("Wrong number of dimensions. values.ndim != ndim " "[{} != {}]") raise ValueError(msg.format(values.ndim, ndim)) return ndim
<SYSTEM_TASK:> validate that we have a astypeable to categorical, <END_TASK> <USER_TASK:> Description: def is_categorical_astype(self, dtype): """ validate that we have a astypeable to categorical, returns a boolean if we are a categorical """
if dtype is Categorical or dtype is CategoricalDtype: # this is a pd.Categorical, but is not # a valid type for astypeing raise TypeError("invalid type {0} for astype".format(dtype)) elif is_categorical_dtype(dtype): return True return False
<SYSTEM_TASK:> return an internal format, currently just the ndarray <END_TASK> <USER_TASK:> Description: def get_values(self, dtype=None): """ return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations """
if is_object_dtype(dtype): return self.values.astype(object) return self.values
<SYSTEM_TASK:> Create a new block, with type inference propagate any values that are <END_TASK> <USER_TASK:> Description: def make_block(self, values, placement=None, ndim=None): """ Create a new block, with type inference propagate any values that are not specified """
if placement is None: placement = self.mgr_locs if ndim is None: ndim = self.ndim return make_block(values, placement=placement, ndim=ndim)
<SYSTEM_TASK:> Wrap given values in a block of same type as self. <END_TASK> <USER_TASK:> Description: def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): """ Wrap given values in a block of same type as self. """
if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn("dtype argument is deprecated, will be removed " "in a future release.", DeprecationWarning) if placement is None: placement = self.mgr_locs return make_block(values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype)
<SYSTEM_TASK:> apply the function to my values; return a block if we are not <END_TASK> <USER_TASK:> Description: def apply(self, func, **kwargs): """ apply the function to my values; return a block if we are not one """
with np.errstate(all='ignore'): result = func(self.values, **kwargs) if not isinstance(result, Block): result = self.make_block(values=_block_shape(result, ndim=self.ndim)) return result
<SYSTEM_TASK:> fillna on the block with the value. If we fail, then convert to <END_TASK> <USER_TASK:> Description: def fillna(self, value, limit=None, inplace=False, downcast=None): """ fillna on the block with the value. If we fail, then convert to ObjectBlock and try again """
inplace = validate_bool_kwarg(inplace, 'inplace') if not self._can_hold_na: if inplace: return self else: return self.copy() mask = isna(self.values) if limit is not None: if not is_integer(limit): raise ValueError('Limit must be an integer') if limit < 1: raise ValueError('Limit must be greater than 0') if self.ndim > 2: raise NotImplementedError("number of dimensions for 'fillna' " "is currently limited to 2") mask[mask.cumsum(self.ndim - 1) > limit] = False # fillna, but if we cannot coerce, then try again as an ObjectBlock try: values, _ = self._try_coerce_args(self.values, value) blocks = self.putmask(mask, value, inplace=inplace) blocks = [b.make_block(values=self._try_coerce_result(b.values)) for b in blocks] return self._maybe_downcast(blocks, downcast) except (TypeError, ValueError): # we can't process the value, but nothing to do if not mask.any(): return self if inplace else self.copy() # operate column-by-column def f(m, v, i): block = self.coerce_to_target_dtype(value) # slice out our block if i is not None: block = block.getitem_block(slice(i, i + 1)) return block.fillna(value, limit=limit, inplace=inplace, downcast=None) return self.split_and_operate(mask, f, inplace)
<SYSTEM_TASK:> split the block per-column, and apply the callable f <END_TASK> <USER_TASK:> Description: def split_and_operate(self, mask, f, inplace): """ split the block per-column, and apply the callable f per-column, return a new block for each. Handle masking which will not change a block unless needed. Parameters ---------- mask : 2-d boolean mask f : callable accepting (1d-mask, 1d values, indexer) inplace : boolean Returns ------- list of blocks """
if mask is None: mask = np.ones(self.shape, dtype=bool) new_values = self.values def make_a_block(nv, ref_loc): if isinstance(nv, Block): block = nv elif isinstance(nv, list): block = nv[0] else: # Put back the dimension that was taken from it and make # a block out of the result. try: nv = _block_shape(nv, ndim=self.ndim) except (AttributeError, NotImplementedError): pass block = self.make_block(values=nv, placement=ref_loc) return block # ndim == 1 if self.ndim == 1: if mask.any(): nv = f(mask, new_values, None) else: nv = new_values if inplace else new_values.copy() block = make_a_block(nv, self.mgr_locs) return [block] # ndim > 1 new_blocks = [] for i, ref_loc in enumerate(self.mgr_locs): m = mask[i] v = new_values[i] # need a new block if m.any(): nv = f(m, v, i) else: nv = v if inplace else v.copy() block = make_a_block(nv, [ref_loc]) new_blocks.append(block) return new_blocks
<SYSTEM_TASK:> try to downcast each item to the dict of dtypes if present <END_TASK> <USER_TASK:> Description: def downcast(self, dtypes=None): """ try to downcast each item to the dict of dtypes if present """
# turn it off completely if dtypes is False: return self values = self.values # single block handling if self._is_single_block: # try to cast all non-floats here if dtypes is None: dtypes = 'infer' nv = maybe_downcast_to_dtype(values, dtypes) return self.make_block(nv) # ndim > 1 if dtypes is None: return self if not (dtypes == 'infer' or isinstance(dtypes, dict)): raise ValueError("downcast must have a dictionary or 'infer' as " "its argument") # operate column-by-column # this is expensive as it splits the blocks items-by-item def f(m, v, i): if dtypes == 'infer': dtype = 'infer' else: raise AssertionError("dtypes as dict is not supported yet") if dtype is not None: v = maybe_downcast_to_dtype(v, dtype) return v return self.split_and_operate(None, f, False)
<SYSTEM_TASK:> require the same dtype as ourselves <END_TASK> <USER_TASK:> Description: def _can_hold_element(self, element): """ require the same dtype as ourselves """
dtype = self.values.dtype.type tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, dtype) return isinstance(element, dtype)
<SYSTEM_TASK:> try to cast the result to our original type, we may have <END_TASK> <USER_TASK:> Description: def _try_cast_result(self, result, dtype=None): """ try to cast the result to our original type, we may have roundtripped thru object in the mean-time """
if dtype is None: dtype = self.dtype if self.is_integer or self.is_bool or self.is_datetime: pass elif self.is_float and result.dtype == self.dtype: # protect against a bool/object showing up here if isinstance(dtype, str) and dtype == 'infer': return result if not isinstance(dtype, type): dtype = dtype.type if issubclass(dtype, (np.bool_, np.object_)): if issubclass(dtype, np.bool_): if isna(result).all(): return result.astype(np.bool_) else: result = result.astype(np.object_) result[result == 1] = True result[result == 0] = False return result else: return result.astype(np.object_) return result # may need to change the dtype here return maybe_downcast_to_dtype(result, dtype)
<SYSTEM_TASK:> replace the to_replace value with value, possible to create new <END_TASK> <USER_TASK:> Description: def replace(self, to_replace, value, inplace=False, filter=None, regex=False, convert=True): """replace the to_replace value with value, possible to create new blocks here this is just a call to putmask. regex is not used here. It is used in ObjectBlocks. It is here for API compatibility. """
inplace = validate_bool_kwarg(inplace, 'inplace') original_to_replace = to_replace # try to replace, if we raise an error, convert to ObjectBlock and # retry try: values, to_replace = self._try_coerce_args(self.values, to_replace) mask = missing.mask_missing(values, to_replace) if filter is not None: filtered_out = ~self.mgr_locs.isin(filter) mask[filtered_out.nonzero()[0]] = False blocks = self.putmask(mask, value, inplace=inplace) if convert: blocks = [b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks] return blocks except (TypeError, ValueError): # GH 22083, TypeError or ValueError occurred within error handling # causes infinite loop. Cast and retry only if not objectblock. if is_object_dtype(self): raise # try again with a compatible block block = self.astype(object) return block.replace(to_replace=original_to_replace, value=value, inplace=inplace, filter=filter, regex=regex, convert=convert)
<SYSTEM_TASK:> Set the value inplace, returning a a maybe different typed block. <END_TASK> <USER_TASK:> Description: def setitem(self, indexer, value): """Set the value inplace, returning a a maybe different typed block. Parameters ---------- indexer : tuple, list-like, array-like, slice The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- `indexer` is a direct slice/positional indexer. `value` must be a compatible shape. """
# coerce None values, if appropriate if value is None: if self.is_numeric: value = np.nan # coerce if block dtype can store value values = self.values try: values, value = self._try_coerce_args(values, value) # can keep its own dtype if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, value.dtype): dtype = self.dtype else: dtype = 'infer' except (TypeError, ValueError): # current dtype cannot store value, coerce to common dtype find_dtype = False if hasattr(value, 'dtype'): dtype = value.dtype find_dtype = True elif lib.is_scalar(value): if isna(value): # NaN promotion is handled in latter path dtype = False else: dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True) find_dtype = True else: dtype = 'infer' if find_dtype: dtype = find_common_type([values.dtype, dtype]) if not is_dtype_equal(self.dtype, dtype): b = self.astype(dtype) return b.setitem(indexer, value) # value must be storeable at this moment arr_value = np.array(value) # cast the values to a type that can hold nan (if necessary) if not self._can_hold_element(value): dtype, _ = maybe_promote(arr_value.dtype) values = values.astype(dtype) transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) # length checking check_setitem_lengths(indexer, value, values) def _is_scalar_indexer(indexer): # return True if we are all scalar indexers if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False def _is_empty_indexer(indexer): # return a boolean if we have an empty indexer if is_list_like(indexer) and not len(indexer): return True if arr_value.ndim == 1: if not isinstance(indexer, tuple): indexer = tuple([indexer]) return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer) return False # empty indexers # 8669 (empty) if _is_empty_indexer(indexer): pass # setting a single element for each dim and with a rhs that could # be say a list # GH 6043 elif _is_scalar_indexer(indexer): values[indexer] = value # if we are an exact match (ex-broadcasting), # then use the resultant dtype elif (len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape)): values[indexer] = value try: values = values.astype(arr_value.dtype) except ValueError: pass # set else: values[indexer] = value # coerce and try to infer the dtypes of the result values = self._try_coerce_and_cast_result(values, dtype) block = self.make_block(transf(values)) return block
<SYSTEM_TASK:> putmask the data to the block; it is possible that we may create a <END_TASK> <USER_TASK:> Description: def putmask(self, mask, new, align=True, inplace=False, axis=0, transpose=False): """ putmask the data to the block; it is possible that we may create a new dtype of block return the resulting block(s) Parameters ---------- mask : the condition to respect new : a ndarray/object align : boolean, perform alignment on other/cond, default is True inplace : perform inplace modification, default is False axis : int transpose : boolean Set to True if self is stored with axes reversed Returns ------- a list of new blocks, the result of the putmask """
new_values = self.values if inplace else self.values.copy() new = getattr(new, 'values', new) mask = getattr(mask, 'values', mask) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: new = self.fill_value if self._can_hold_element(new): _, new = self._try_coerce_args(new_values, new) if transpose: new_values = new_values.T # If the default repeat behavior in np.putmask would go in the # wrong direction, then explicitly repeat and reshape new instead if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim and axis == 1: new = np.repeat( new, new_values.shape[-1]).reshape(self.shape) new = new.astype(new_values.dtype) # we require exact matches between the len of the # values we are setting (or is compat). np.putmask # doesn't check this and will simply truncate / pad # the output, but we want sane error messages # # TODO: this prob needs some better checking # for 2D cases if ((is_list_like(new) and np.any(mask[mask]) and getattr(new, 'ndim', 1) == 1)): if not (mask.shape[-1] == len(new) or mask[mask].shape[-1] == len(new) or len(new) == 1): raise ValueError("cannot assign mismatch " "length to masked array") np.putmask(new_values, mask, new) # maybe upcast me elif mask.any(): if transpose: mask = mask.T if isinstance(new, np.ndarray): new = new.T axis = new_values.ndim - axis - 1 # Pseudo-broadcast if getattr(new, 'ndim', 0) >= 1: if self.ndim - 1 == new.ndim: new_shape = list(new.shape) new_shape.insert(axis, 1) new = new.reshape(tuple(new_shape)) # operate column-by-column def f(m, v, i): if i is None: # ndim==1 case. n = new else: if isinstance(new, np.ndarray): n = np.squeeze(new[i % new.shape[0]]) else: n = np.array(new) # type of the new block dtype, _ = maybe_promote(n.dtype) # we need to explicitly astype here to make a copy n = n.astype(dtype) nv = _putmask_smart(v, m, n) return nv new_blocks = self.split_and_operate(mask, f, inplace) return new_blocks if inplace: return [self] if transpose: new_values = new_values.T return [self.make_block(new_values)]
<SYSTEM_TASK:> coerce the current block to a dtype compat for other <END_TASK> <USER_TASK:> Description: def coerce_to_target_dtype(self, other): """ coerce the current block to a dtype compat for other we will return a block, possibly object, and not raise we can also safely try to coerce to the same dtype and will receive the same block """
# if we cannot then coerce to object dtype, _ = infer_dtype_from(other, pandas_dtype=True) if is_dtype_equal(self.dtype, dtype): return self if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): # we don't upcast to bool return self.astype(object) elif ((self.is_float or self.is_complex) and (is_integer_dtype(dtype) or is_float_dtype(dtype))): # don't coerce float/complex to int return self elif (self.is_datetime or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): # not a datetime if not ((is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)) and self.is_datetime): return self.astype(object) # don't upcast timezone with different timezone or no timezone mytz = getattr(self.dtype, 'tz', None) othertz = getattr(dtype, 'tz', None) if str(mytz) != str(othertz): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) elif (self.is_timedelta or is_timedelta64_dtype(dtype)): # not a timedelta if not (is_timedelta64_dtype(dtype) and self.is_timedelta): return self.astype(object) raise AssertionError("possible recursion in " "coerce_to_target_dtype: {} {}".format( self, other)) try: return self.astype(dtype) except (ValueError, TypeError, OverflowError): pass return self.astype(object)
<SYSTEM_TASK:> fillna but using the interpolate machinery <END_TASK> <USER_TASK:> Description: def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, downcast=None): """ fillna but using the interpolate machinery """
inplace = validate_bool_kwarg(inplace, 'inplace') # if we are coercing, then don't force the conversion # if the block can't hold the type if coerce: if not self._can_hold_na: if inplace: return [self] else: return [self.copy()] values = self.values if inplace else self.values.copy() values, fill_value = self._try_coerce_args(values, fill_value) values = missing.interpolate_2d(values, method=method, axis=axis, limit=limit, fill_value=fill_value, dtype=self.dtype) values = self._try_coerce_result(values) blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast)
<SYSTEM_TASK:> interpolate using scipy wrappers <END_TASK> <USER_TASK:> Description: def _interpolate(self, method=None, index=None, values=None, fill_value=None, axis=0, limit=None, limit_direction='forward', limit_area=None, inplace=False, downcast=None, **kwargs): """ interpolate using scipy wrappers """
inplace = validate_bool_kwarg(inplace, 'inplace') data = self.values if inplace else self.values.copy() # only deal with floats if not self.is_float: if not self.is_integer: return self data = data.astype(np.float64) if fill_value is None: fill_value = self.fill_value if method in ('krogh', 'piecewise_polynomial', 'pchip'): if not index.is_monotonic: raise ValueError("{0} interpolation requires that the " "index be monotonic.".format(method)) # process 1-d slices in the axis direction def func(x): # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to missing.interpolate_1d return missing.interpolate_1d(index, x, method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area, fill_value=fill_value, bounds_error=False, **kwargs) # interp each column independently interp_values = np.apply_along_axis(func, axis, data) blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast)