INSTRUCTION
stringlengths
1
46.3k
RESPONSE
stringlengths
75
80.2k
Wrapper function for Series arithmetic operations, to avoid code duplication.
def _bool_method_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def na_op(x, y): try: result = op(x, y) except TypeError: assert not isinstance(y, (list, ABCSeries, ABCIndexClass)) if isinstance(y, np.ndarray): # bool-bool dtype operations should be OK, should not get here assert not (is_bool_dtype(x) and is_bool_dtype(y)) x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError): raise TypeError("cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" .format(dtype=x.dtype, typ=type(y).__name__)) return result fill_int = lambda x: x.fillna(0) fill_bool = lambda x: x.fillna(False).astype(bool) def wrapper(self, other): is_self_int_dtype = is_integer_dtype(self.dtype) self, other = _align_method_SERIES(self, other, align_asobject=True) res_name = get_op_result_name(self, other) if isinstance(other, ABCDataFrame): # Defer to DataFrame implementation; fail early return NotImplemented elif isinstance(other, (ABCSeries, ABCIndexClass)): is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) ovalues = other.values finalizer = lambda x: x else: # scalars, list, tuple, np.array is_other_int_dtype = is_integer_dtype(np.asarray(other)) if is_list_like(other) and not isinstance(other, np.ndarray): # TODO: Can we do this before the is_integer_dtype check? # could the is_integer_dtype check be checking the wrong # thing? e.g. other = [[0, 1], [2, 3], [4, 5]]? other = construct_1d_object_array_from_listlike(other) ovalues = other finalizer = lambda x: x.__finalize__(self) # For int vs int `^`, `|`, `&` are bitwise operators and return # integer dtypes. Otherwise these are boolean ops filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) res_values = na_op(self.values, ovalues) unfilled = self._constructor(res_values, index=self.index, name=res_name) filled = filler(unfilled) return finalizer(filled) wrapper.__name__ = op_name return wrapper
Apply binary operator `func` to self, other using alignment and fill conventions determined by the fill_value, axis, and level kwargs. Parameters ---------- self : DataFrame other : Series func : binary operator fill_value : object, default None axis : {0, 1, 'columns', 'index', None}, default None level : int or None, default None Returns ------- result : DataFrame
def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None): """ Apply binary operator `func` to self, other using alignment and fill conventions determined by the fill_value, axis, and level kwargs. Parameters ---------- self : DataFrame other : Series func : binary operator fill_value : object, default None axis : {0, 1, 'columns', 'index', None}, default None level : int or None, default None Returns ------- result : DataFrame """ if fill_value is not None: raise NotImplementedError("fill_value {fill} not supported." .format(fill=fill_value)) if axis is not None: axis = self._get_axis_number(axis) if axis == 0: return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level) else: if not len(other): return self * np.nan if not len(self): # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # default axis is columns return self._combine_match_columns(other, func, level=level)
convert rhs to meet lhs dims if input is list, tuple or np.ndarray
def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ def to_series(right): msg = ('Unable to coerce to Series, length must be {req_len}: ' 'given {given_len}') if axis is not None and left._get_axis_name(axis) == 'index': if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) elif (is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame))): # GH17901 right = to_series(right) return right
For SparseSeries operation, coerce to float64 if the result is expected to have NaN or inf values Parameters ---------- left : SparseArray right : SparseArray opname : str Returns ------- left : SparseArray right : SparseArray
def _cast_sparse_series_op(left, right, opname): """ For SparseSeries operation, coerce to float64 if the result is expected to have NaN or inf values Parameters ---------- left : SparseArray right : SparseArray opname : str Returns ------- left : SparseArray right : SparseArray """ from pandas.core.sparse.api import SparseDtype opname = opname.strip('_') # TODO: This should be moved to the array? if is_integer_dtype(left) and is_integer_dtype(right): # series coerces to float64 if result should have NaN/inf if opname in ('floordiv', 'mod') and (right.values == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) elif opname in ('rfloordiv', 'rmod') and (left.values == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) return left, right
Wrapper function for Series arithmetic operations, to avoid code duplication.
def _arith_method_SPARSE_SERIES(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): if isinstance(other, ABCDataFrame): return NotImplemented elif isinstance(other, ABCSeries): if not isinstance(other, ABCSparseSeries): other = other.to_sparse(fill_value=self.fill_value) return _sparse_series_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): new_values = op(self.values, other) return self._constructor(new_values, index=self.index, name=self.name) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper
Wrapper function for Series arithmetic operations, to avoid code duplication.
def _arith_method_SPARSE_ARRAY(cls, op, special): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ op_name = _get_op_name(op, special) def wrapper(self, other): from pandas.core.arrays.sparse.array import ( SparseArray, _sparse_array_op, _wrap_result, _get_fill) if isinstance(other, np.ndarray): if len(self) != len(other): raise AssertionError("length mismatch: {self} vs. {other}" .format(self=len(self), other=len(other))) if not isinstance(other, SparseArray): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) return _sparse_array_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) result = op(self.sp_values, other) return _wrap_result(op_name, result, self.sp_index, fill) else: # pragma: no cover raise TypeError('operation with {other} not supported' .format(other=type(other))) wrapper.__name__ = op_name return wrapper
If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int
def validate_periods(periods): """ If a `periods` argument is passed to the Datetime/Timedelta Array/Index constructor, cast it to an integer. Parameters ---------- periods : None, float, int Returns ------- periods : None or int Raises ------ TypeError if periods is None, float, or int """ if periods is not None: if lib.is_float(periods): periods = int(periods) elif not lib.is_integer(periods): raise TypeError('periods must be a number, got {periods}' .format(periods=periods)) return periods
Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values
def validate_endpoints(closed): """ Check that the `closed` argument is among [None, "left", "right"] Parameters ---------- closed : {None, "left", "right"} Returns ------- left_closed : bool right_closed : bool Raises ------ ValueError : if argument is not among valid values """ left_closed = False right_closed = False if closed is None: left_closed = True right_closed = True elif closed == "left": left_closed = True elif closed == "right": right_closed = True else: raise ValueError("Closed has to be either 'left', 'right' or None") return left_closed, right_closed
If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None freq_infer : bool Returns ------- freq : DateOffset or None freq_infer : bool Notes ----- We assume at this point that `maybe_infer_freq` has been called, so `freq` is either a DateOffset object or None.
def validate_inferred_freq(freq, inferred_freq, freq_infer): """ If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None freq_infer : bool Returns ------- freq : DateOffset or None freq_infer : bool Notes ----- We assume at this point that `maybe_infer_freq` has been called, so `freq` is either a DateOffset object or None. """ if inferred_freq is not None: if freq is not None and freq != inferred_freq: raise ValueError('Inferred frequency {inferred} from passed ' 'values does not conform to passed frequency ' '{passed}' .format(inferred=inferred_freq, passed=freq.freqstr)) elif freq is None: freq = inferred_freq freq_infer = False return freq, freq_infer
Comparing a DateOffset to the string "infer" raises, so we need to be careful about comparisons. Make a dummy variable `freq_infer` to signify the case where the given freq is "infer" and set freq to None to avoid comparison trouble later on. Parameters ---------- freq : {DateOffset, None, str} Returns ------- freq : {DateOffset, None} freq_infer : bool
def maybe_infer_freq(freq): """ Comparing a DateOffset to the string "infer" raises, so we need to be careful about comparisons. Make a dummy variable `freq_infer` to signify the case where the given freq is "infer" and set freq to None to avoid comparison trouble later on. Parameters ---------- freq : {DateOffset, None, str} Returns ------- freq : {DateOffset, None} freq_infer : bool """ freq_infer = False if not isinstance(freq, DateOffset): # if a passed freq is None, don't infer automatically if freq != 'infer': freq = frequencies.to_offset(freq) else: freq_infer = True freq = None return freq, freq_infer
Helper for coercing an input scalar or array to i8. Parameters ---------- other : 1d array to_utc : bool, default False If True, convert the values to UTC before extracting the i8 values If False, extract the i8 values directly. Returns ------- i8 1d array
def _ensure_datetimelike_to_i8(other, to_utc=False): """ Helper for coercing an input scalar or array to i8. Parameters ---------- other : 1d array to_utc : bool, default False If True, convert the values to UTC before extracting the i8 values If False, extract the i8 values directly. Returns ------- i8 1d array """ from pandas import Index from pandas.core.arrays import PeriodArray if lib.is_scalar(other) and isna(other): return iNaT elif isinstance(other, (PeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)): # convert tz if needed if getattr(other, 'tz', None) is not None: if to_utc: other = other.tz_convert('UTC') else: other = other.tz_localize(None) else: try: return np.array(other, copy=False).view('i8') except TypeError: # period array cannot be coerced to int other = Index(other) return other.asi8
Construct a scalar type from a string. Parameters ---------- value : str Returns ------- Period, Timestamp, or Timedelta, or NaT Whatever the type of ``self._scalar_type`` is. Notes ----- This should call ``self._check_compatible_with`` before unboxing the result.
def _scalar_from_string( self, value: str, ) -> Union[Period, Timestamp, Timedelta, NaTType]: """ Construct a scalar type from a string. Parameters ---------- value : str Returns ------- Period, Timestamp, or Timedelta, or NaT Whatever the type of ``self._scalar_type`` is. Notes ----- This should call ``self._check_compatible_with`` before unboxing the result. """ raise AbstractMethodError(self)
Unbox the integer value of a scalar `value`. Parameters ---------- value : Union[Period, Timestamp, Timedelta] Returns ------- int Examples -------- >>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP 10000000000
def _unbox_scalar( self, value: Union[Period, Timestamp, Timedelta, NaTType], ) -> int: """ Unbox the integer value of a scalar `value`. Parameters ---------- value : Union[Period, Timestamp, Timedelta] Returns ------- int Examples -------- >>> self._unbox_scalar(Timedelta('10s')) # DOCTEST: +SKIP 10000000000 """ raise AbstractMethodError(self)
Verify that `self` and `other` are compatible. * DatetimeArray verifies that the timezones (if any) match * PeriodArray verifies that the freq matches * Timedelta has no verification In each case, NaT is considered compatible. Parameters ---------- other Raises ------ Exception
def _check_compatible_with( self, other: Union[Period, Timestamp, Timedelta, NaTType], ) -> None: """ Verify that `self` and `other` are compatible. * DatetimeArray verifies that the timezones (if any) match * PeriodArray verifies that the freq matches * Timedelta has no verification In each case, NaT is considered compatible. Parameters ---------- other Raises ------ Exception """ raise AbstractMethodError(self)
Convert to Index using specified date_format. Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc <%(URL)s>`__. Parameters ---------- date_format : str Date format string (e.g. "%%Y-%%m-%%d"). Returns ------- Index Index of formatted strings. See Also -------- to_datetime : Convert the given argument to datetime. DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. DatetimeIndex.round : Round the DatetimeIndex to the specified freq. DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. Examples -------- >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), ... periods=3, freq='s') >>> rng.strftime('%%B %%d, %%Y, %%r') Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', 'March 10, 2018, 09:00:02 AM'], dtype='object')
def strftime(self, date_format): """ Convert to Index using specified date_format. Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc <%(URL)s>`__. Parameters ---------- date_format : str Date format string (e.g. "%%Y-%%m-%%d"). Returns ------- Index Index of formatted strings. See Also -------- to_datetime : Convert the given argument to datetime. DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. DatetimeIndex.round : Round the DatetimeIndex to the specified freq. DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. Examples -------- >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), ... periods=3, freq='s') >>> rng.strftime('%%B %%d, %%Y, %%r') Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', 'March 10, 2018, 09:00:02 AM'], dtype='object') """ from pandas import Index return Index(self._format_native_types(date_format=date_format))
Find indices where elements should be inserted to maintain order. Find the indices into a sorted array `self` such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Parameters ---------- value : array_like Values to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort `self` into ascending order. They are typically the result of ``np.argsort``. Returns ------- indices : array of ints Array of insertion points with the same shape as `value`.
def searchsorted(self, value, side='left', sorter=None): """ Find indices where elements should be inserted to maintain order. Find the indices into a sorted array `self` such that, if the corresponding elements in `value` were inserted before the indices, the order of `self` would be preserved. Parameters ---------- value : array_like Values to insert into `self`. side : {'left', 'right'}, optional If 'left', the index of the first suitable location found is given. If 'right', return the last such index. If there is no suitable index, return either 0 or N (where N is the length of `self`). sorter : 1-D array_like, optional Optional array of integer indices that sort `self` into ascending order. They are typically the result of ``np.argsort``. Returns ------- indices : array of ints Array of insertion points with the same shape as `value`. """ if isinstance(value, str): value = self._scalar_from_string(value) if not (isinstance(value, (self._scalar_type, type(self))) or isna(value)): raise ValueError("Unexpected type for 'value': {valtype}" .format(valtype=type(value))) self._check_compatible_with(value) if isinstance(value, type(self)): value = value.asi8 else: value = self._unbox_scalar(value) return self.asi8.searchsorted(value, side=side, sorter=sorter)
Repeat elements of an array. See Also -------- numpy.ndarray.repeat
def repeat(self, repeats, *args, **kwargs): """ Repeat elements of an array. See Also -------- numpy.ndarray.repeat """ nv.validate_repeat(args, kwargs) values = self._data.repeat(repeats) return type(self)(values.view('i8'), dtype=self.dtype)
Return a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaT values. Returns ------- Series
def value_counts(self, dropna=False): """ Return a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaT values. Returns ------- Series """ from pandas import Series, Index if dropna: values = self[~self.isna()]._data else: values = self._data cls = type(self) result = value_counts(values, sort=False, dropna=dropna) index = Index(cls(result.index.view('i8'), dtype=self.dtype), name=result.index.name) return Series(result.values, index=index, name=result.name)
Parameters ---------- result : a ndarray fill_value : object, default iNaT convert : string/dtype or None Returns ------- result : ndarray with values replace by the fill_value mask the result if needed, convert to the provided dtype if its not None This is an internal routine.
def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): """ Parameters ---------- result : a ndarray fill_value : object, default iNaT convert : string/dtype or None Returns ------- result : ndarray with values replace by the fill_value mask the result if needed, convert to the provided dtype if its not None This is an internal routine. """ if self._hasnans: if convert: result = result.astype(convert) if fill_value is None: fill_value = np.nan result[self._isnan] = fill_value return result
Validate that a frequency is compatible with the values of a given Datetime Array/Index or Timedelta Array/Index Parameters ---------- index : DatetimeIndex or TimedeltaIndex The index on which to determine if the given frequency is valid freq : DateOffset The frequency to validate
def _validate_frequency(cls, index, freq, **kwargs): """ Validate that a frequency is compatible with the values of a given Datetime Array/Index or Timedelta Array/Index Parameters ---------- index : DatetimeIndex or TimedeltaIndex The index on which to determine if the given frequency is valid freq : DateOffset The frequency to validate """ if is_period_dtype(cls): # Frequency validation is not meaningful for Period Array/Index return None inferred = index.inferred_freq if index.size == 0 or inferred == freq.freqstr: return None try: on_freq = cls._generate_range(start=index[0], end=None, periods=len(index), freq=freq, **kwargs) if not np.array_equal(index.asi8, on_freq.asi8): raise ValueError except ValueError as e: if "non-fixed" in str(e): # non-fixed frequencies are not meaningful for timedelta64; # we retain that error message raise e # GH#11587 the main way this is reached is if the `np.array_equal` # check above is False. This can also be reached if index[0] # is `NaT`, in which case the call to `cls._generate_range` will # raise a ValueError, which we re-raise with a more targeted # message. raise ValueError('Inferred frequency {infer} from passed values ' 'does not conform to passed frequency {passed}' .format(infer=inferred, passed=freq.freqstr))
Add a timedelta-like, Tick or TimedeltaIndex-like object to self, yielding an int64 numpy array Parameters ---------- delta : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : ndarray[int64] Notes ----- The result's name is set outside of _add_delta by the calling method (__add__ or __sub__), if necessary (i.e. for Indexes).
def _add_delta(self, other): """ Add a timedelta-like, Tick or TimedeltaIndex-like object to self, yielding an int64 numpy array Parameters ---------- delta : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : ndarray[int64] Notes ----- The result's name is set outside of _add_delta by the calling method (__add__ or __sub__), if necessary (i.e. for Indexes). """ if isinstance(other, (Tick, timedelta, np.timedelta64)): new_values = self._add_timedeltalike_scalar(other) elif is_timedelta64_dtype(other): # ndarray[timedelta64] or TimedeltaArray/index new_values = self._add_delta_tdi(other) return new_values
Add a delta of a timedeltalike return the i8 result view
def _add_timedeltalike_scalar(self, other): """ Add a delta of a timedeltalike return the i8 result view """ if isna(other): # i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds new_values = np.empty(len(self), dtype='i8') new_values[:] = iNaT return new_values inc = delta_to_nanoseconds(other) new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view('i8') new_values = self._maybe_mask_results(new_values) return new_values.view('i8')
Add a delta of a TimedeltaIndex return the i8 result view
def _add_delta_tdi(self, other): """ Add a delta of a TimedeltaIndex return the i8 result view """ if len(self) != len(other): raise ValueError("cannot add indices of unequal length") if isinstance(other, np.ndarray): # ndarray[timedelta64]; wrap in TimedeltaIndex for op from pandas import TimedeltaIndex other = TimedeltaIndex(other) self_i8 = self.asi8 other_i8 = other.asi8 new_values = checked_add_with_arr(self_i8, other_i8, arr_mask=self._isnan, b_mask=other._isnan) if self._hasnans or other._hasnans: mask = (self._isnan) | (other._isnan) new_values[mask] = iNaT return new_values.view('i8')
Add pd.NaT to self
def _add_nat(self): """ Add pd.NaT to self """ if is_period_dtype(self): raise TypeError('Cannot add {cls} and {typ}' .format(cls=type(self).__name__, typ=type(NaT).__name__)) # GH#19124 pd.NaT is treated like a timedelta for both timedelta # and datetime dtypes result = np.zeros(len(self), dtype=np.int64) result.fill(iNaT) return type(self)(result, dtype=self.dtype, freq=None)
Subtract pd.NaT from self
def _sub_nat(self): """ Subtract pd.NaT from self """ # GH#19124 Timedelta - datetime is not in general well-defined. # We make an exception for pd.NaT, which in this case quacks # like a timedelta. # For datetime64 dtypes by convention we treat NaT as a datetime, so # this subtraction returns a timedelta64 dtype. # For period dtype, timedelta64 is a close-enough return dtype. result = np.zeros(len(self), dtype=np.int64) result.fill(iNaT) return result.view('timedelta64[ns]')
Subtract a Period Array/Index from self. This is only valid if self is itself a Period Array/Index, raises otherwise. Both objects must have the same frequency. Parameters ---------- other : PeriodIndex or PeriodArray Returns ------- result : np.ndarray[object] Array of DateOffset objects; nulls represented by NaT.
def _sub_period_array(self, other): """ Subtract a Period Array/Index from self. This is only valid if self is itself a Period Array/Index, raises otherwise. Both objects must have the same frequency. Parameters ---------- other : PeriodIndex or PeriodArray Returns ------- result : np.ndarray[object] Array of DateOffset objects; nulls represented by NaT. """ if not is_period_dtype(self): raise TypeError("cannot subtract {dtype}-dtype from {cls}" .format(dtype=other.dtype, cls=type(self).__name__)) if len(self) != len(other): raise ValueError("cannot subtract arrays/indices of " "unequal length") if self.freq != other.freq: msg = DIFFERENT_FREQ.format(cls=type(self).__name__, own_freq=self.freqstr, other_freq=other.freqstr) raise IncompatibleFrequency(msg) new_values = checked_add_with_arr(self.asi8, -other.asi8, arr_mask=self._isnan, b_mask=other._isnan) new_values = np.array([self.freq.base * x for x in new_values]) if self._hasnans or other._hasnans: mask = (self._isnan) | (other._isnan) new_values[mask] = NaT return new_values
Add or subtract array-like of integers equivalent to applying `_time_shift` pointwise. Parameters ---------- other : Index, ExtensionArray, np.ndarray integer-dtype op : {operator.add, operator.sub} Returns ------- result : same class as self
def _addsub_int_array(self, other, op): """ Add or subtract array-like of integers equivalent to applying `_time_shift` pointwise. Parameters ---------- other : Index, ExtensionArray, np.ndarray integer-dtype op : {operator.add, operator.sub} Returns ------- result : same class as self """ # _addsub_int_array is overriden by PeriodArray assert not is_period_dtype(self) assert op in [operator.add, operator.sub] if self.freq is None: # GH#19123 raise NullFrequencyError("Cannot shift with no freq") elif isinstance(self.freq, Tick): # easy case where we can convert to timedelta64 operation td = Timedelta(self.freq) return op(self, td * other) # We should only get here with DatetimeIndex; dispatch # to _addsub_offset_array assert not is_timedelta64_dtype(self) return op(self, np.array(other) * self.freq)
Add or subtract array-like of DateOffset objects Parameters ---------- other : Index, np.ndarray object-dtype containing pd.DateOffset objects op : {operator.add, operator.sub} Returns ------- result : same class as self
def _addsub_offset_array(self, other, op): """ Add or subtract array-like of DateOffset objects Parameters ---------- other : Index, np.ndarray object-dtype containing pd.DateOffset objects op : {operator.add, operator.sub} Returns ------- result : same class as self """ assert op in [operator.add, operator.sub] if len(other) == 1: return op(self, other[0]) warnings.warn("Adding/subtracting array of DateOffsets to " "{cls} not vectorized" .format(cls=type(self).__name__), PerformanceWarning) # For EA self.astype('O') returns a numpy array, not an Index left = lib.values_from_object(self.astype('O')) res_values = op(left, np.array(other)) kwargs = {} if not is_period_dtype(self): kwargs['freq'] = 'infer' return self._from_sequence(res_values, **kwargs)
Shift each value by `periods`. Note this is different from ExtensionArray.shift, which shifts the *position* of each element, padding the end with missing values. Parameters ---------- periods : int Number of periods to shift by. freq : pandas.DateOffset, pandas.Timedelta, or string Frequency increment to shift by.
def _time_shift(self, periods, freq=None): """ Shift each value by `periods`. Note this is different from ExtensionArray.shift, which shifts the *position* of each element, padding the end with missing values. Parameters ---------- periods : int Number of periods to shift by. freq : pandas.DateOffset, pandas.Timedelta, or string Frequency increment to shift by. """ if freq is not None and freq != self.freq: if isinstance(freq, str): freq = frequencies.to_offset(freq) offset = periods * freq result = self + offset return result if periods == 0: # immutable so OK return self.copy() if self.freq is None: raise NullFrequencyError("Cannot shift with no freq") start = self[0] + periods * self.freq end = self[-1] + periods * self.freq # Note: in the DatetimeTZ case, _generate_range will infer the # appropriate timezone from `start` and `end`, so tz does not need # to be passed explicitly. return self._generate_range(start=start, end=end, periods=None, freq=self.freq)
Ensure that we are re-localized. This is for compat as we can then call this on all datetimelike arrays generally (ignored for Period/Timedelta) Parameters ---------- arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray] ambiguous : str, bool, or bool-ndarray, default 'raise' nonexistent : str, default 'raise' from_utc : bool, default False If True, localize the i8 ndarray to UTC first before converting to the appropriate tz. If False, localize directly to the tz. Returns ------- localized array
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', from_utc=False): """ Ensure that we are re-localized. This is for compat as we can then call this on all datetimelike arrays generally (ignored for Period/Timedelta) Parameters ---------- arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray] ambiguous : str, bool, or bool-ndarray, default 'raise' nonexistent : str, default 'raise' from_utc : bool, default False If True, localize the i8 ndarray to UTC first before converting to the appropriate tz. If False, localize directly to the tz. Returns ------- localized array """ # reconvert to local tz tz = getattr(self, 'tz', None) if tz is not None: if not isinstance(arg, type(self)): arg = self._simple_new(arg) if from_utc: arg = arg.tz_localize('UTC').tz_convert(self.tz) else: arg = arg.tz_localize( self.tz, ambiguous=ambiguous, nonexistent=nonexistent ) return arg
Return the minimum value of the Array or minimum along an axis. See Also -------- numpy.ndarray.min Index.min : Return the minimum value in an Index. Series.min : Return the minimum value in a Series.
def min(self, axis=None, skipna=True, *args, **kwargs): """ Return the minimum value of the Array or minimum along an axis. See Also -------- numpy.ndarray.min Index.min : Return the minimum value in an Index. Series.min : Return the minimum value in a Series. """ nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna()) if isna(result): # Period._from_ordinal does not handle np.nan gracefully return NaT return self._box_func(result)
Return the maximum value of the Array or maximum along an axis. See Also -------- numpy.ndarray.max Index.max : Return the maximum value in an Index. Series.max : Return the maximum value in a Series.
def max(self, axis=None, skipna=True, *args, **kwargs): """ Return the maximum value of the Array or maximum along an axis. See Also -------- numpy.ndarray.max Index.max : Return the maximum value in an Index. Series.max : Return the maximum value in a Series. """ # TODO: skipna is broken with max. # See https://github.com/pandas-dev/pandas/issues/24265 nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) mask = self.isna() if skipna: values = self[~mask].asi8 elif mask.any(): return NaT else: values = self.asi8 if not len(values): # short-circut for empty max / min return NaT result = nanops.nanmax(values, skipna=skipna) # Don't have to worry about NA `result`, since no NA went in. return self._box_func(result)
Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : DateOffset, Period, ndarray, or timedelta-like Raises ------ IncompatibleFrequency
def _raise_on_incompatible(left, right): """ Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : DateOffset, Period, ndarray, or timedelta-like Raises ------ IncompatibleFrequency """ # GH#24283 error message format depends on whether right is scalar if isinstance(right, np.ndarray): other_freq = None elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)): other_freq = right.freqstr else: other_freq = _delta_to_tick(Timedelta(right)).freqstr msg = DIFFERENT_FREQ.format(cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq) raise IncompatibleFrequency(msg)
Wrap comparison operations to convert Period-like to PeriodDtype
def _period_array_cmp(cls, op): """ Wrap comparison operations to convert Period-like to PeriodDtype """ opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): op = getattr(self.asi8, opname) if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented if is_list_like(other) and len(other) != len(self): raise ValueError("Lengths must match") if isinstance(other, Period): self._check_compatible_with(other) result = op(other.ordinal) elif isinstance(other, cls): self._check_compatible_with(other) result = op(other.asi8) mask = self._isnan | other._isnan if mask.any(): result[mask] = nat_result return result elif other is NaT: result = np.empty(len(self.asi8), dtype=bool) result.fill(nat_result) else: other = Period(other, freq=self.freq) result = op(other.ordinal) if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC]
def period_array( data: Sequence[Optional[Period]], freq: Optional[Tick] = None, copy: bool = False, ) -> PeriodArray: """ Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC] """ if is_datetime64_dtype(data): return PeriodArray._from_datetime64(data, freq) if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)): return PeriodArray(data, freq) # other iterable of some kind if not isinstance(data, (np.ndarray, list, tuple)): data = list(data) data = np.asarray(data) if freq: dtype = PeriodDtype(freq) else: dtype = None if is_float_dtype(data) and len(data) > 0: raise TypeError("PeriodIndex does not allow " "floating point in construction") data = ensure_object(data) return PeriodArray._from_sequence(data, dtype=dtype)
If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq
def validate_dtype_freq(dtype, freq): """ If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq """ if freq is not None: freq = frequencies.to_offset(freq) if dtype is not None: dtype = pandas_dtype(dtype) if not is_period_dtype(dtype): raise ValueError('dtype must be PeriodDtype') if freq is None: freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency('specified freq and dtype ' 'are different') return freq
Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int] freq : Tick The frequencey extracted from the Series or DatetimeIndex if that's used.
def dt64arr_to_periodarr(data, freq, tz=None): """ Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int] freq : Tick The frequencey extracted from the Series or DatetimeIndex if that's used. """ if data.dtype != np.dtype('M8[ns]'): raise ValueError('Wrong dtype: {dtype}'.format(dtype=data.dtype)) if freq is None: if isinstance(data, ABCIndexClass): data, freq = data._values, data.freq elif isinstance(data, ABCSeries): data, freq = data._values, data.dt.freq freq = Period._maybe_convert_freq(freq) if isinstance(data, (ABCIndexClass, ABCSeries)): data = data._values base, mult = libfrequencies.get_freq_code(freq) return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz), freq
Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq]
def _from_datetime64(cls, data, freq, tz=None): """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """ data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq)
Cast to DatetimeArray/Index. Parameters ---------- freq : string or DateOffset, optional Target frequency. The default is 'D' for week or longer, 'S' otherwise how : {'s', 'e', 'start', 'end'} Returns ------- DatetimeArray/Index
def to_timestamp(self, freq=None, how='start'): """ Cast to DatetimeArray/Index. Parameters ---------- freq : string or DateOffset, optional Target frequency. The default is 'D' for week or longer, 'S' otherwise how : {'s', 'e', 'start', 'end'} Returns ------- DatetimeArray/Index """ from pandas.core.arrays import DatetimeArray how = libperiod._validate_end_alias(how) end = how == 'E' if end: if freq == 'B': # roll forward to ensure we land on B date adjust = Timedelta(1, 'D') - Timedelta(1, 'ns') return self.to_timestamp(how='start') + adjust else: adjust = Timedelta(1, 'ns') return (self + self.freq).to_timestamp(how='start') - adjust if freq is None: base, mult = libfrequencies.get_freq_code(self.freq) freq = libfrequencies.get_to_timestamp_base(base) else: freq = Period._maybe_convert_freq(freq) base, mult = libfrequencies.get_freq_code(freq) new_data = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_data.asi8, base) return DatetimeArray._from_sequence(new_data, freq='infer')
Shift each value by `periods`. Note this is different from ExtensionArray.shift, which shifts the *position* of each element, padding the end with missing values. Parameters ---------- periods : int Number of periods to shift by. freq : pandas.DateOffset, pandas.Timedelta, or string Frequency increment to shift by.
def _time_shift(self, periods, freq=None): """ Shift each value by `periods`. Note this is different from ExtensionArray.shift, which shifts the *position* of each element, padding the end with missing values. Parameters ---------- periods : int Number of periods to shift by. freq : pandas.DateOffset, pandas.Timedelta, or string Frequency increment to shift by. """ if freq is not None: raise TypeError("`freq` argument is not supported for " "{cls}._time_shift" .format(cls=type(self).__name__)) values = self.asi8 + periods * self.freq.n if self._hasnans: values[self._isnan] = iNaT return type(self)(values, freq=self.freq)
Convert the Period Array/Index to the specified frequency `freq`. Parameters ---------- freq : str a frequency how : str {'E', 'S'} 'E', 'END', or 'FINISH' for end, 'S', 'START', or 'BEGIN' for start. Whether the elements should be aligned to the end or start within pa period. January 31st ('END') vs. January 1st ('START') for example. Returns ------- new : Period Array/Index with the new frequency Examples -------- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], dtype='period[A-DEC]', freq='A-DEC') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', '2015-12'], dtype='period[M]', freq='M') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', '2015-01'], dtype='period[M]', freq='M')
def asfreq(self, freq=None, how='E'): """ Convert the Period Array/Index to the specified frequency `freq`. Parameters ---------- freq : str a frequency how : str {'E', 'S'} 'E', 'END', or 'FINISH' for end, 'S', 'START', or 'BEGIN' for start. Whether the elements should be aligned to the end or start within pa period. January 31st ('END') vs. January 1st ('START') for example. Returns ------- new : Period Array/Index with the new frequency Examples -------- >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') >>> pidx PeriodIndex(['2010', '2011', '2012', '2013', '2014', '2015'], dtype='period[A-DEC]', freq='A-DEC') >>> pidx.asfreq('M') PeriodIndex(['2010-12', '2011-12', '2012-12', '2013-12', '2014-12', '2015-12'], dtype='period[M]', freq='M') >>> pidx.asfreq('M', how='S') PeriodIndex(['2010-01', '2011-01', '2012-01', '2013-01', '2014-01', '2015-01'], dtype='period[M]', freq='M') """ how = libperiod._validate_end_alias(how) freq = Period._maybe_convert_freq(freq) base1, mult1 = libfrequencies.get_freq_code(self.freq) base2, mult2 = libfrequencies.get_freq_code(freq) asi8 = self.asi8 # mult1 can't be negative or 0 end = how == 'E' if end: ordinal = asi8 + mult1 - 1 else: ordinal = asi8 new_data = period_asfreq_arr(ordinal, base1, base2, end) if self._hasnans: new_data[self._isnan] = iNaT return type(self)(new_data, freq=freq)
actually format my specific types
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): """ actually format my specific types """ values = self.astype(object) if date_format: formatter = lambda dt: dt.strftime(date_format) else: formatter = lambda dt: '%s' % dt if self._hasnans: mask = self._isnan values[mask] = na_rep imask = ~mask values[imask] = np.array([formatter(dt) for dt in values[imask]]) else: values = np.array([formatter(dt) for dt in values]) return values
Parameters ---------- other : timedelta, Tick, np.timedelta64 Returns ------- result : ndarray[int64]
def _add_timedeltalike_scalar(self, other): """ Parameters ---------- other : timedelta, Tick, np.timedelta64 Returns ------- result : ndarray[int64] """ assert isinstance(self.freq, Tick) # checked by calling function assert isinstance(other, (timedelta, np.timedelta64, Tick)) if notna(other): # special handling for np.timedelta64("NaT"), avoid calling # _check_timedeltalike_freq_compat as that would raise TypeError other = self._check_timedeltalike_freq_compat(other) # Note: when calling parent class's _add_timedeltalike_scalar, # it will call delta_to_nanoseconds(delta). Because delta here # is an integer, delta_to_nanoseconds will return it unchanged. ordinals = super()._add_timedeltalike_scalar(other) return ordinals
Parameters ---------- other : TimedeltaArray or ndarray[timedelta64] Returns ------- result : ndarray[int64]
def _add_delta_tdi(self, other): """ Parameters ---------- other : TimedeltaArray or ndarray[timedelta64] Returns ------- result : ndarray[int64] """ assert isinstance(self.freq, Tick) # checked by calling function delta = self._check_timedeltalike_freq_compat(other) return self._addsub_int_array(delta, operator.add).asi8
Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new PeriodArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : PeriodArray
def _add_delta(self, other): """ Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new PeriodArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : PeriodArray """ if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray _raise_on_incompatible(self, other) new_ordinals = super()._add_delta(other) return type(self)(new_ordinals, freq=self.freq)
Arithmetic operations with timedelta-like scalars or array `other` are only valid if `other` is an integer multiple of `self.freq`. If the operation is valid, find that integer multiple. Otherwise, raise because the operation is invalid. Parameters ---------- other : timedelta, np.timedelta64, Tick, ndarray[timedelta64], TimedeltaArray, TimedeltaIndex Returns ------- multiple : int or ndarray[int64] Raises ------ IncompatibleFrequency
def _check_timedeltalike_freq_compat(self, other): """ Arithmetic operations with timedelta-like scalars or array `other` are only valid if `other` is an integer multiple of `self.freq`. If the operation is valid, find that integer multiple. Otherwise, raise because the operation is invalid. Parameters ---------- other : timedelta, np.timedelta64, Tick, ndarray[timedelta64], TimedeltaArray, TimedeltaIndex Returns ------- multiple : int or ndarray[int64] Raises ------ IncompatibleFrequency """ assert isinstance(self.freq, Tick) # checked by calling function own_offset = frequencies.to_offset(self.freq.rule_code) base_nanos = delta_to_nanoseconds(own_offset) if isinstance(other, (timedelta, np.timedelta64, Tick)): nanos = delta_to_nanoseconds(other) elif isinstance(other, np.ndarray): # numpy timedelta64 array; all entries must be compatible assert other.dtype.kind == 'm' if other.dtype != _TD_DTYPE: # i.e. non-nano unit # TODO: disallow unit-less timedelta64 other = other.astype(_TD_DTYPE) nanos = other.view('i8') else: # TimedeltaArray/Index nanos = other.asi8 if np.all(nanos % base_nanos == 0): # nanos being added is an integer multiple of the # base-frequency to self.freq delta = nanos // base_nanos # delta is the integer (or integer-array) number of periods # by which will be added to self. return delta _raise_on_incompatible(self, other)
Detect missing values. Treat None, NaN, INF, -INF as null. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean
def _isna_old(obj): """Detect missing values. Treat None, NaN, INF, -INF as null. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean """ if is_scalar(obj): return libmissing.checknull_old(obj) # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=_isna_old)) elif isinstance(obj, list): return _isna_ndarraylike_old(np.asarray(obj, dtype=object)) elif hasattr(obj, '__array__'): return _isna_ndarraylike_old(np.asarray(obj)) else: return obj is None
Option change callback for na/inf behaviour Choose which replacement for numpy.isnan / -numpy.isfinite is used. Parameters ---------- flag: bool True means treat None, NaN, INF, -INF as null (old way), False means None and NaN are null, but INF, -INF are not null (new way). Notes ----- This approach to setting global module values is discussed and approved here: * http://stackoverflow.com/questions/4859217/ programmatically-creating-variables-in-python/4859312#4859312
def _use_inf_as_na(key): """Option change callback for na/inf behaviour Choose which replacement for numpy.isnan / -numpy.isfinite is used. Parameters ---------- flag: bool True means treat None, NaN, INF, -INF as null (old way), False means None and NaN are null, but INF, -INF are not null (new way). Notes ----- This approach to setting global module values is discussed and approved here: * http://stackoverflow.com/questions/4859217/ programmatically-creating-variables-in-python/4859312#4859312 """ from pandas._config import get_option flag = get_option(key) if flag: globals()['_isna'] = _isna_old else: globals()['_isna'] = _isna_new
Parameters ---------- arr: a numpy array fill_value: fill value, default to np.nan Returns ------- True if we can fill using this fill_value
def _isna_compat(arr, fill_value=np.nan): """ Parameters ---------- arr: a numpy array fill_value: fill value, default to np.nan Returns ------- True if we can fill using this fill_value """ dtype = arr.dtype if isna(fill_value): return not (is_bool_dtype(dtype) or is_integer_dtype(dtype)) return True
True if two arrays, left and right, have equal non-NaN elements, and NaNs in corresponding locations. False otherwise. It is assumed that left and right are NumPy arrays of the same dtype. The behavior of this function (particularly with respect to NaNs) is not defined if the dtypes are different. Parameters ---------- left, right : ndarrays strict_nan : bool, default False If True, consider NaN and None to be different. Returns ------- b : bool Returns True if the arrays are equivalent. Examples -------- >>> array_equivalent( ... np.array([1, 2, np.nan]), ... np.array([1, 2, np.nan])) True >>> array_equivalent( ... np.array([1, np.nan, 2]), ... np.array([1, 2, np.nan])) False
def array_equivalent(left, right, strict_nan=False): """ True if two arrays, left and right, have equal non-NaN elements, and NaNs in corresponding locations. False otherwise. It is assumed that left and right are NumPy arrays of the same dtype. The behavior of this function (particularly with respect to NaNs) is not defined if the dtypes are different. Parameters ---------- left, right : ndarrays strict_nan : bool, default False If True, consider NaN and None to be different. Returns ------- b : bool Returns True if the arrays are equivalent. Examples -------- >>> array_equivalent( ... np.array([1, 2, np.nan]), ... np.array([1, 2, np.nan])) True >>> array_equivalent( ... np.array([1, np.nan, 2]), ... np.array([1, 2, np.nan])) False """ left, right = np.asarray(left), np.asarray(right) # shape compat if left.shape != right.shape: return False # Object arrays can contain None, NaN and NaT. # string dtypes must be come to this path for NumPy 1.7.1 compat if is_string_dtype(left) or is_string_dtype(right): if not strict_nan: # isna considers NaN and None to be equivalent. return lib.array_equivalent_object( ensure_object(left.ravel()), ensure_object(right.ravel())) for left_value, right_value in zip(left, right): if left_value is NaT and right_value is not NaT: return False elif isinstance(left_value, float) and np.isnan(left_value): if (not isinstance(right_value, float) or not np.isnan(right_value)): return False else: if left_value != right_value: return False return True # NaNs can occur in float and complex arrays. if is_float_dtype(left) or is_complex_dtype(left): # empty if not (np.prod(left.shape) and np.prod(right.shape)): return True return ((left == right) | (isna(left) & isna(right))).all() # numpy will will not allow this type of datetimelike vs integer comparison elif is_datetimelike_v_numeric(left, right): return False # M8/m8 elif needs_i8_conversion(left) and needs_i8_conversion(right): if not is_dtype_equal(left.dtype, right.dtype): return False left = left.view('i8') right = right.view('i8') # if we have structured dtypes, compare first if (left.dtype.type is np.void or right.dtype.type is np.void): if left.dtype != right.dtype: return False return np.array_equal(left, right)
infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction
def _infer_fill_value(val): """ infer the fill value for the nan/NaT from the provided scalar/ndarray/list-like if we are a NaT, return the correct dtyped element to provide proper block construction """ if not is_list_like(val): val = [val] val = np.array(val, copy=False) if is_datetimelike(val): return np.array('NaT', dtype=val.dtype) elif is_object_dtype(val.dtype): dtype = lib.infer_dtype(ensure_object(val), skipna=False) if dtype in ['datetime', 'datetime64']: return np.array('NaT', dtype=_NS_DTYPE) elif dtype in ['timedelta', 'timedelta64']: return np.array('NaT', dtype=_TD_DTYPE) return np.nan
if we have a compatible fill_value and arr dtype, then fill
def _maybe_fill(arr, fill_value=np.nan): """ if we have a compatible fill_value and arr dtype, then fill """ if _isna_compat(arr, fill_value): arr.fill(fill_value) return arr
Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : boolean, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype('int64')) 0 >>> na_value_for_dtype(np.dtype('int64'), compat=False) nan >>> na_value_for_dtype(np.dtype('float64')) nan >>> na_value_for_dtype(np.dtype('bool')) False >>> na_value_for_dtype(np.dtype('datetime64[ns]')) NaT
def na_value_for_dtype(dtype, compat=True): """ Return a dtype compat na value Parameters ---------- dtype : string / dtype compat : boolean, default True Returns ------- np.dtype or a pandas dtype Examples -------- >>> na_value_for_dtype(np.dtype('int64')) 0 >>> na_value_for_dtype(np.dtype('int64'), compat=False) nan >>> na_value_for_dtype(np.dtype('float64')) nan >>> na_value_for_dtype(np.dtype('bool')) False >>> na_value_for_dtype(np.dtype('datetime64[ns]')) NaT """ dtype = pandas_dtype(dtype) if is_extension_array_dtype(dtype): return dtype.na_value if (is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype) or is_timedelta64_dtype(dtype) or is_period_dtype(dtype)): return NaT elif is_float_dtype(dtype): return np.nan elif is_integer_dtype(dtype): if compat: return 0 return np.nan elif is_bool_dtype(dtype): return False return np.nan
Return array-like containing only true/non-NaN values, possibly empty.
def remove_na_arraylike(arr): """ Return array-like containing only true/non-NaN values, possibly empty. """ if is_extension_array_dtype(arr): return arr[notna(arr)] else: return arr[notna(lib.values_from_object(arr))]
Helper function to convert DataFrame and Series to matplotlib.table Parameters ---------- ax : Matplotlib axes object data : DataFrame or Series data for table contents kwargs : keywords, optional keyword arguments which passed to matplotlib.table.table. If `rowLabels` or `colLabels` is not specified, data index or column name will be used. Returns ------- matplotlib table object
def table(ax, data, rowLabels=None, colLabels=None, **kwargs): """ Helper function to convert DataFrame and Series to matplotlib.table Parameters ---------- ax : Matplotlib axes object data : DataFrame or Series data for table contents kwargs : keywords, optional keyword arguments which passed to matplotlib.table.table. If `rowLabels` or `colLabels` is not specified, data index or column name will be used. Returns ------- matplotlib table object """ if isinstance(data, ABCSeries): data = data.to_frame() elif isinstance(data, ABCDataFrame): pass else: raise ValueError('Input data must be DataFrame or Series') if rowLabels is None: rowLabels = data.index if colLabels is None: colLabels = data.columns cellText = data.values import matplotlib.table table = matplotlib.table.table(ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs) return table
Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of subplots, including the enclosing figure object, in a single call. Keyword arguments: naxes : int Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols. sharex : bool If True, the X axis will be shared amongst all subplots. sharey : bool If True, the Y axis will be shared amongst all subplots. squeeze : bool If True, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If False, no squeezing is done: the returned axis object is always a 2-d array containing Axis instances, even if it ends up being 1x1. subplot_kw : dict Dict with keywords passed to the add_subplot() call used to create each subplots. ax : Matplotlib axis object, optional layout : tuple Number of rows and columns of the subplot grid. If not specified, calculated from naxes and layout_type layout_type : {'box', 'horziontal', 'vertical'}, default 'box' Specify how to layout the subplot grid. fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns: fig, ax : tuple - fig is the Matplotlib Figure object - ax can be either a single axis object or an array of axis objects if more than one subplot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. **Examples:** x = np.linspace(0, 2*np.pi, 400) y = np.sin(x**2) # Just a figure and one subplot f, ax = plt.subplots() ax.plot(x, y) ax.set_title('Simple plot') # Two subplots, unpack the output array immediately f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.plot(x, y) ax1.set_title('Sharing Y axis') ax2.scatter(x, y) # Four polar axes plt.subplots(2, 2, subplot_kw=dict(polar=True))
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True, subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw): """Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of subplots, including the enclosing figure object, in a single call. Keyword arguments: naxes : int Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols. sharex : bool If True, the X axis will be shared amongst all subplots. sharey : bool If True, the Y axis will be shared amongst all subplots. squeeze : bool If True, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If False, no squeezing is done: the returned axis object is always a 2-d array containing Axis instances, even if it ends up being 1x1. subplot_kw : dict Dict with keywords passed to the add_subplot() call used to create each subplots. ax : Matplotlib axis object, optional layout : tuple Number of rows and columns of the subplot grid. If not specified, calculated from naxes and layout_type layout_type : {'box', 'horziontal', 'vertical'}, default 'box' Specify how to layout the subplot grid. fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns: fig, ax : tuple - fig is the Matplotlib Figure object - ax can be either a single axis object or an array of axis objects if more than one subplot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. **Examples:** x = np.linspace(0, 2*np.pi, 400) y = np.sin(x**2) # Just a figure and one subplot f, ax = plt.subplots() ax.plot(x, y) ax.set_title('Simple plot') # Two subplots, unpack the output array immediately f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.plot(x, y) ax1.set_title('Sharing Y axis') ax2.scatter(x, y) # Four polar axes plt.subplots(2, 2, subplot_kw=dict(polar=True)) """ import matplotlib.pyplot as plt if subplot_kw is None: subplot_kw = {} if ax is None: fig = plt.figure(**fig_kw) else: if is_list_like(ax): ax = _flatten(ax) if layout is not None: warnings.warn("When passing multiple axes, layout keyword is " "ignored", UserWarning) if sharex or sharey: warnings.warn("When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified " "when creating axes", UserWarning, stacklevel=4) if len(ax) == naxes: fig = ax[0].get_figure() return fig, ax else: raise ValueError("The number of passed axes must be {0}, the " "same as the output plot".format(naxes)) fig = ax.get_figure() # if ax is passed and a number of subplots is 1, return ax as it is if naxes == 1: if squeeze: return fig, ax else: return fig, _flatten(ax) else: warnings.warn("To output multiple subplots, the figure containing " "the passed axes is being cleared", UserWarning, stacklevel=4) fig.clear() nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) nplots = nrows * ncols # Create empty object array to hold all axes. It's easiest to make it 1-d # so we can just append subplots upon creation, and then axarr = np.empty(nplots, dtype=object) # Create first subplot separately, so we can share it if requested ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) if sharex: subplot_kw['sharex'] = ax0 if sharey: subplot_kw['sharey'] = ax0 axarr[0] = ax0 # Note off-by-one counting because add_subplot uses the MATLAB 1-based # convention. for i in range(1, nplots): kwds = subplot_kw.copy() # Set sharex and sharey to None for blank/dummy axes, these can # interfere with proper axis limits on the visible axes if # they share axes e.g. issue #7528 if i >= naxes: kwds['sharex'] = None kwds['sharey'] = None ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) axarr[i] = ax if naxes != nplots: for ax in axarr[naxes:]: ax.set_visible(False) _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: # Reshape the array to have the final desired dimension (nrow,ncol), # though discarding unneeded dimensions that equal 1. If we only have # one subplot, just return it instead of a 1-element array. if nplots == 1: axes = axarr[0] else: axes = axarr.reshape(nrows, ncols).squeeze() else: # returned axis array will be always 2-d, even if nrows=ncols=1 axes = axarr.reshape(nrows, ncols) return fig, axes
Render tempita templates before calling cythonize
def maybe_cythonize(extensions, *args, **kwargs): """ Render tempita templates before calling cythonize """ if len(sys.argv) > 1 and 'clean' in sys.argv: # Avoid running cythonize on `python setup.py clean` # See https://github.com/cython/cython/issues/1495 return extensions if not cython: # Avoid trying to look up numpy when installing from sdist # https://github.com/pandas-dev/pandas/issues/25193 # TODO: See if this can be removed after pyproject.toml added. return extensions numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') # TODO: Is this really necessary here? for ext in extensions: if (hasattr(ext, 'include_dirs') and numpy_incl not in ext.include_dirs): ext.include_dirs.append(numpy_incl) build_ext.render_templates(_pxifiles) return cythonize(extensions, *args, **kwargs)
Fast transform path for aggregations
def _transform_fast(self, result, obj, func_nm): """ Fast transform path for aggregations """ # if there were groups with no observations (Categorical only?) # try casting data to original dtype cast = self._transform_should_cast(func_nm) # for each col, reshape to to size of original frame # by take operation ids, _, ngroup = self.grouper.group_info output = [] for i, _ in enumerate(result.columns): res = algorithms.take_1d(result.iloc[:, i].values, ids) if cast: res = self._try_cast(res, obj.iloc[:, i]) output.append(res) return DataFrame._from_arrays(output, columns=result.columns, index=obj.index)
Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- filtered : DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a DataFrame excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- f : function Function to apply to each subframe. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Returns ------- filtered : DataFrame Notes ----- Each subframe is endowed the attribute 'name' in case you need to know which group you are working on. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> grouped.filter(lambda x: x['B'].mean() > 3.) A B C 1 bar 2 5.0 3 bar 4 1.0 5 bar 6 9.0 """ indices = [] obj = self._selected_obj gen = self.grouper.get_iterator(obj, axis=self.axis) for name, group in gen: object.__setattr__(group, 'name', name) res = func(group, *args, **kwargs) try: res = res.squeeze() except AttributeError: # allow e.g., scalars and frames to pass pass # interpret the result of the filter if is_bool(res) or (is_scalar(res) and isna(res)): if res and notna(res): indices.append(self._get_index(name)) else: # non scalars aren't allowed raise TypeError("filter function returned a %s, " "but expected a scalar bool" % type(res).__name__) return self._apply_filter(indices, dropna)
common agg/transform wrapping logic
def _wrap_output(self, output, index, names=None): """ common agg/transform wrapping logic """ output = output[self._selection_name] if names is not None: return DataFrame(output, index=index, columns=names) else: name = self._selection_name if name is None: name = self._selected_obj.name return Series(output, index=index, name=name)
fast version of transform, only applicable to builtin/cythonizable functions
def _transform_fast(self, func, func_nm): """ fast version of transform, only applicable to builtin/cythonizable functions """ if isinstance(func, str): func = getattr(self, func) ids, _, ngroup = self.grouper.group_info cast = self._transform_should_cast(func_nm) out = algorithms.take_1d(func()._values, ids) if cast: out = self._try_cast(out, self.obj) return Series(out, index=self.obj.index, name=self.obj.name)
Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series
def filter(self, func, dropna=True, *args, **kwargs): # noqa """ Return a copy of a Series excluding elements from groups that do not satisfy the boolean criterion specified by func. Parameters ---------- func : function To apply to each group. Should return True or False. dropna : Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. Examples -------- >>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar', ... 'foo', 'bar'], ... 'B' : [1, 2, 3, 4, 5, 6], ... 'C' : [2.0, 5., 8., 1., 2., 9.]}) >>> grouped = df.groupby('A') >>> df.groupby('A').B.filter(lambda x: x.mean() > 3.) 1 2 3 4 5 6 Name: B, dtype: int64 Returns ------- filtered : Series """ if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) # Interpret np.nan as False. def true_and_notna(x, *args, **kwargs): b = wrapper(x, *args, **kwargs) return b and notna(b) try: indices = [self._get_index(name) for name, group in self if true_and_notna(group)] except ValueError: raise TypeError("the filter must return a boolean result") except TypeError: raise TypeError("the filter must return a boolean result") filtered = self._apply_filter(indices, dropna) return filtered
Return number of unique elements in the group.
def nunique(self, dropna=True): """ Return number of unique elements in the group. """ ids, _, _ = self.grouper.group_info val = self.obj.get_values() try: sorter = np.lexsort((val, ids)) except TypeError: # catches object dtypes msg = 'val.dtype must be object, got {}'.format(val.dtype) assert val.dtype == object, msg val, _ = algorithms.factorize(val, sort=False) sorter = np.lexsort((val, ids)) _isna = lambda a: a == -1 else: _isna = isna ids, val = ids[sorter], val[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] inc = np.r_[1, val[1:] != val[:-1]] # 1st item of each group is a new unique observation mask = _isna(val) if dropna: inc[idx] = 1 inc[mask] = 0 else: inc[mask & np.r_[False, mask[:-1]]] = 0 inc[idx] = 1 out = np.add.reduceat(inc, idx).astype('int64', copy=False) if len(ids): # NaN/NaT group exists if the head of ids is -1, # so remove it from res and exclude its index from idx if ids[0] == -1: res = out[1:] idx = idx[np.flatnonzero(idx)] else: res = out else: res = out[1:] ri = self.grouper.result_index # we might have duplications among the bins if len(res) != len(ri): res, out = np.zeros(len(ri), dtype=out.dtype), res res[ids[idx]] = out return Series(res, index=ri, name=self._selection_name)
Compute count of group, excluding missing values
def count(self): """ Compute count of group, excluding missing values """ ids, _, ngroups = self.grouper.group_info val = self.obj.get_values() mask = (ids != -1) & ~isna(val) ids = ensure_platform_int(ids) minlength = ngroups or 0 out = np.bincount(ids[mask], minlength=minlength) return Series(out, index=self.grouper.result_index, name=self._selection_name, dtype='int64')
Calcuate pct_change of each value to previous entry in group
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None): """Calcuate pct_change of each value to previous entry in group""" # TODO: Remove this conditional when #23918 is fixed if freq: return self.apply(lambda x: x.pct_change(periods=periods, fill_method=fill_method, limit=limit, freq=freq)) filled = getattr(self, fill_method)(limit=limit) fill_grp = filled.groupby(self.grouper.labels) shifted = fill_grp.shift(periods=periods, freq=freq) return (filled / shifted) - 1
sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on
def _gotitem(self, key, ndim, subset=None): """ sub-classes to define return a sliced object Parameters ---------- key : string / list of selections ndim : 1,2 requested ndim of result subset : object, default None subset to act on """ if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy(subset, self.grouper, selection=key, grouper=self.grouper, exclusions=self.exclusions, as_index=self.as_index, observed=self.observed) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy(subset, selection=key, grouper=self.grouper) raise AssertionError("invalid ndim for _gotitem")
If we have categorical groupers, then we want to make sure that we have a fully reindex-output to the levels. These may have not participated in the groupings (e.g. may have all been nan groups); This can re-expand the output space
def _reindex_output(self, result): """ If we have categorical groupers, then we want to make sure that we have a fully reindex-output to the levels. These may have not participated in the groupings (e.g. may have all been nan groups); This can re-expand the output space """ # we need to re-expand the output space to accomodate all values # whether observed or not in the cartesian product of our groupes groupings = self.grouper.groupings if groupings is None: return result elif len(groupings) == 1: return result # if we only care about the observed values # we are done elif self.observed: return result # reindexing only applies to a Categorical grouper elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex)) for ping in groupings): return result levels_list = [ping.group_index for ping in groupings] index, _ = MultiIndex.from_product( levels_list, names=self.grouper.names).sortlevel() if self.as_index: d = {self.obj._get_axis_name(self.axis): index, 'copy': False} return result.reindex(**d) # GH 13204 # Here, the categorical in-axis groupers, which need to be fully # expanded, are columns in `result`. An idea is to do: # result = result.set_index(self.grouper.names) # .reindex(index).reset_index() # but special care has to be taken because of possible not-in-axis # groupers. # So, we manually select and drop the in-axis grouper columns, # reindex `result`, and then reset the in-axis grouper columns. # Select in-axis groupers in_axis_grps = ((i, ping.name) for (i, ping) in enumerate(groupings) if ping.in_axis) g_nums, g_names = zip(*in_axis_grps) result = result.drop(labels=list(g_names), axis=1) # Set a temp index and reindex (possibly expanding) result = result.set_index(self.grouper.result_index ).reindex(index, copy=False) # Reset in-axis grouper columns # (using level numbers `g_nums` because level names may not be unique) result = result.reset_index(level=g_nums) return result.reset_index(drop=True)
Overridden method to join grouped columns in output
def _fill(self, direction, limit=None): """Overridden method to join grouped columns in output""" res = super()._fill(direction, limit=limit) output = OrderedDict( (grp.name, grp.grouper) for grp in self.grouper.groupings) from pandas import concat return concat((self._wrap_transformed_output(output), res), axis=1)
Compute count of group, excluding missing values
def count(self): """ Compute count of group, excluding missing values """ from pandas.core.dtypes.missing import _isna_ndarraylike as _isna data, _ = self._get_data_to_aggregate() ids, _, ngroups = self.grouper.group_info mask = ids != -1 val = ((mask & ~_isna(np.atleast_2d(blk.get_values()))) for blk in data.blocks) loc = (blk.mgr_locs for blk in data.blocks) counter = partial( lib.count_level_2d, labels=ids, max_bin=ngroups, axis=1) blk = map(make_block, map(counter, val), loc) return self._wrap_agged_blocks(data.items, list(blk))
Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y
def nunique(self, dropna=True): """ Return DataFrame with number of distinct observations per group for each column. .. versionadded:: 0.20.0 Parameters ---------- dropna : boolean, default True Don't include NaN in the counts. Returns ------- nunique: DataFrame Examples -------- >>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam', ... 'ham', 'ham'], ... 'value1': [1, 5, 5, 2, 5, 5], ... 'value2': list('abbaxy')}) >>> df id value1 value2 0 spam 1 a 1 egg 5 b 2 egg 5 b 3 spam 2 a 4 ham 5 x 5 ham 5 y >>> df.groupby('id').nunique() id value1 value2 id egg 1 1 1 ham 1 1 2 spam 1 2 1 Check for rows with the same id but conflicting values: >>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any()) id value1 value2 0 spam 1 a 3 spam 2 a 4 ham 5 x 5 ham 5 y """ obj = self._selected_obj def groupby_series(obj, col=None): return SeriesGroupBy(obj, selection=col, grouper=self.grouper).nunique(dropna=dropna) if isinstance(obj, Series): results = groupby_series(obj) else: from pandas.core.reshape.concat import concat results = [groupby_series(obj[col], col) for col in obj.columns] results = concat(results, axis=1) results.columns.names = obj.columns.names if not self.as_index: results.index = ibase.default_index(len(results)) return results
Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. For Numpy-backed ExtensionArrays, the ndarray is extracted. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) [a, b, c] Categories (3, object): [a, b, c] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index a PandasArray is returned. >>> extract_array(pd.Series([1, 2, 3])) <PandasArray> [1, 2, 3] Length: 3, dtype: int64 To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3])
def extract_array(obj, extract_numpy=False): """ Extract the ndarray or ExtensionArray from a Series or Index. For all other types, `obj` is just returned as is. Parameters ---------- obj : object For Series / Index, the underlying ExtensionArray is unboxed. For Numpy-backed ExtensionArrays, the ndarray is extracted. extract_numpy : bool, default False Whether to extract the ndarray from a PandasArray Returns ------- arr : object Examples -------- >>> extract_array(pd.Series(['a', 'b', 'c'], dtype='category')) [a, b, c] Categories (3, object): [a, b, c] Other objects like lists, arrays, and DataFrames are just passed through. >>> extract_array([1, 2, 3]) [1, 2, 3] For an ndarray-backed Series / Index a PandasArray is returned. >>> extract_array(pd.Series([1, 2, 3])) <PandasArray> [1, 2, 3] Length: 3, dtype: int64 To extract all the way down to the ndarray, pass ``extract_numpy=True``. >>> extract_array(pd.Series([1, 2, 3]), extract_numpy=True) array([1, 2, 3]) """ if isinstance(obj, (ABCIndexClass, ABCSeries)): obj = obj.array if extract_numpy and isinstance(obj, ABCPandasArray): obj = obj.to_numpy() return obj
Flatten an arbitrarily nested sequence. Parameters ---------- l : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator
def flatten(l): """ Flatten an arbitrarily nested sequence. Parameters ---------- l : sequence The non string sequence to flatten Notes ----- This doesn't consider strings sequences. Returns ------- flattened : generator """ for el in l: if _iterable_not_string(el): for s in flatten(el): yield s else: yield el
Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values.
def is_bool_indexer(key: Any) -> bool: """ Check whether `key` is a valid boolean indexer. Parameters ---------- key : Any Only list-likes may be considered boolean indexers. All other types are not considered a boolean indexer. For array-like input, boolean ndarrays or ExtensionArrays with ``_is_boolean`` set are considered boolean indexers. Returns ------- bool Raises ------ ValueError When the array is an object-dtype ndarray or ExtensionArray and contains missing values. """ na_msg = 'cannot index with vector containing NA / NaN values' if (isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or (is_array_like(key) and is_extension_array_dtype(key.dtype))): if key.dtype == np.object_: key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): raise ValueError(na_msg) return False return True elif is_bool_dtype(key.dtype): # an ndarray with bool-dtype by definition has no missing values. # So we only need to check for NAs in ExtensionArrays if is_extension_array_dtype(key.dtype): if np.any(key.isna()): raise ValueError(na_msg) return True elif isinstance(key, list): try: arr = np.asarray(key) return arr.dtype == np.bool_ and len(arr) == len(key) except TypeError: # pragma: no cover return False return False
To avoid numpy DeprecationWarnings, cast float to integer where valid. Parameters ---------- val : scalar Returns ------- outval : scalar
def cast_scalar_indexer(val): """ To avoid numpy DeprecationWarnings, cast float to integer where valid. Parameters ---------- val : scalar Returns ------- outval : scalar """ # assumes lib.is_scalar(val) if lib.is_float(val) and val == int(val): return int(val) return val
Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array
def index_labels_to_array(labels, dtype=None): """ Transform label or iterable of labels to array, for use in Index. Parameters ---------- dtype : dtype If specified, use as dtype of the resulting array, otherwise infer. Returns ------- array """ if isinstance(labels, (str, tuple)): labels = [labels] if not isinstance(labels, (list, np.ndarray)): try: labels = list(labels) except TypeError: # non-iterable labels = [labels] labels = asarray_tuplesafe(labels, dtype=dtype) return labels
We have a null slice.
def is_null_slice(obj): """ We have a null slice. """ return (isinstance(obj, slice) and obj.start is None and obj.stop is None and obj.step is None)
We have a full length slice.
def is_full_slice(obj, l): """ We have a full length slice. """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None)
Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs
def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is. Parameters ---------- maybe_callable : possibly a callable obj : NDFrame **kwargs """ if callable(maybe_callable): return maybe_callable(obj, **kwargs) return maybe_callable
Helper function to standardize a supplied mapping. .. versionadded:: 0.21.0 Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict
def standardize_mapping(into): """ Helper function to standardize a supplied mapping. .. versionadded:: 0.21.0 Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict """ if not inspect.isclass(into): if isinstance(into, collections.defaultdict): return partial( collections.defaultdict, into.default_factory) into = type(into) if not issubclass(into, abc.Mapping): raise TypeError('unsupported type: {into}'.format(into=into)) elif into == collections.defaultdict: raise TypeError( 'to_dict() only accepts initialized defaultdicts') return into
Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState
def random_state(state=None): """ Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState """ if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif state is None: return np.random else: raise ValueError("random_state must be an integer, a numpy " "RandomState, or None")
Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, string) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of `callable`` that expects the object. args : iterable, optional positional arguments passed into ``func``. kwargs : dict, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``.
def _pipe(obj, func, *args, **kwargs): """ Apply a function ``func`` to object ``obj`` either by passing obj as the first argument to the function or, in the case that the func is a tuple, interpret the first element of the tuple as a function and pass the obj to that function as a keyword argument whose key is the value of the second element of the tuple. Parameters ---------- func : callable or tuple of (callable, string) Function to apply to this object or, alternatively, a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a string indicating the keyword of `callable`` that expects the object. args : iterable, optional positional arguments passed into ``func``. kwargs : dict, optional a dictionary of keyword arguments passed into ``func``. Returns ------- object : the return type of ``func``. """ if isinstance(func, tuple): func, target = func if target in kwargs: msg = '%s is both the pipe target and a keyword argument' % target raise ValueError(msg) kwargs[target] = obj return func(*args, **kwargs) else: return func(obj, *args, **kwargs)
Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function.
def _get_rename_function(mapper): """ Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function. """ if isinstance(mapper, (abc.Mapping, ABCSeries)): def f(x): if x in mapper: return mapper[x] else: return x else: f = mapper return f
return the correct fill value for the dtype of the values
def _get_fill_value(dtype, fill_value=None, fill_value_typ=None): """ return the correct fill value for the dtype of the values """ if fill_value is not None: return fill_value if _na_ok_dtype(dtype): if fill_value_typ is None: return np.nan else: if fill_value_typ == '+inf': return np.inf else: return -np.inf else: if fill_value_typ is None: return tslibs.iNaT else: if fill_value_typ == '+inf': # need the max int here return _int64_max else: return tslibs.iNaT
utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy
def _get_values(values, skipna, fill_value=None, fill_value_typ=None, isfinite=False, copy=True, mask=None): """ utility to get the values view, mask, dtype if necessary copy and mask using the specified fill_value copy = True will force the copy """ if is_datetime64tz_dtype(values): # com.values_from_object returns M8[ns] dtype instead of tz-aware, # so this case must be handled separately from the rest dtype = values.dtype values = getattr(values, "_values", values) else: values = com.values_from_object(values) dtype = values.dtype if mask is None: if isfinite: mask = _isfinite(values) else: mask = isna(values) if is_datetime_or_timedelta_dtype(values) or is_datetime64tz_dtype(values): # changing timedelta64/datetime64 to int64 needs to happen after # finding `mask` above values = getattr(values, "asi8", values) values = values.view(np.int64) dtype_ok = _na_ok_dtype(dtype) # get our fill value (in case we need to provide an alternative # dtype for it) fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ) if skipna: if copy: values = values.copy() if dtype_ok: np.putmask(values, mask, fill_value) # promote if needed else: values, changed = maybe_upcast_putmask(values, mask, fill_value) elif copy: values = values.copy() # return a platform independent precision dtype dtype_max = dtype if is_integer_dtype(dtype) or is_bool_dtype(dtype): dtype_max = np.int64 elif is_float_dtype(dtype): dtype_max = np.float64 return values, mask, dtype, dtype_max, fill_value
wrap our results if needed
def _wrap_results(result, dtype, fill_value=None): """ wrap our results if needed """ if is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): if fill_value is None: # GH#24293 fill_value = iNaT if not isinstance(result, np.ndarray): tz = getattr(dtype, 'tz', None) assert not isna(fill_value), "Expected non-null fill_value" if result == fill_value: result = np.nan result = tslibs.Timestamp(result, tz=tz) else: result = result.view(dtype) elif is_timedelta64_dtype(dtype): if not isinstance(result, np.ndarray): if result == fill_value: result = np.nan # raise if we have a timedelta64[ns] which is too large if np.fabs(result) > _int64_max: raise ValueError("overflow in timedelta operation") result = tslibs.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) return result
Return the missing value for `values` Parameters ---------- values : ndarray axis : int or None axis for the reduction Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing.
def _na_for_min_count(values, axis): """Return the missing value for `values` Parameters ---------- values : ndarray axis : int or None axis for the reduction Returns ------- result : scalar or ndarray For 1-D values, returns a scalar of the correct missing type. For 2-D values, returns a 1-D array where each element is missing. """ # we either return np.nan or pd.NaT if is_numeric_dtype(values): values = values.astype('float64') fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value else: result_shape = (values.shape[:axis] + values.shape[axis + 1:]) result = np.empty(result_shape, dtype=values.dtype) result.fill(fill_value) return result
Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False
def nanany(values, axis=None, skipna=True, mask=None): """ Check if any elements along an axis evaluate to True. Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2]) >>> nanops.nanany(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([np.nan]) >>> nanops.nanany(s) False """ values, mask, dtype, _, _ = _get_values(values, skipna, False, copy=skipna, mask=mask) return values.any(axis)
Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False
def nanall(values, axis=None, skipna=True, mask=None): """ Check if all elements along an axis evaluate to True. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : bool Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanall(s) True >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 0]) >>> nanops.nanall(s) False """ values, mask, dtype, _, _ = _get_values(values, skipna, True, copy=skipna, mask=mask) return values.all(axis)
Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0
def nansum(values, axis=None, skipna=True, min_count=0, mask=None): """ Sum the elements along an axis ignoring NaNs Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nansum(s) 3.0 """ values, mask, dtype, dtype_max, _ = _get_values(values, skipna, 0, mask=mask) dtype_sum = dtype_max if is_float_dtype(dtype): dtype_sum = dtype elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype)
Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5
def nanmean(values, axis=None, skipna=True, mask=None): """ Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s) 1.5 """ values, mask, dtype, dtype_max, _ = _get_values( values, skipna, 0, mask=mask) dtype_sum = dtype_max dtype_count = np.float64 if (is_integer_dtype(dtype) or is_timedelta64_dtype(dtype) or is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype)): dtype_sum = np.float64 elif is_float_dtype(dtype): dtype_sum = dtype dtype_count = dtype count = _get_counts(mask, axis, dtype=dtype_count) the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum)) if axis is not None and getattr(the_sum, 'ndim', False): with np.errstate(all="ignore"): # suppress division by zero warnings the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return _wrap_results(the_mean, dtype)
Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 2]) >>> nanops.nanmedian(s) 2.0
def nanmedian(values, axis=None, skipna=True, mask=None): """ Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 2]) >>> nanops.nanmedian(s) 2.0 """ def get_median(x): mask = notna(x) if not skipna and not mask.all(): return np.nan return np.nanmedian(x[mask]) values, mask, dtype, dtype_max, _ = _get_values(values, skipna, mask=mask) if not is_float_dtype(values): values = values.astype('f8') values[mask] = np.nan if axis is None: values = values.ravel() notempty = values.size # an array from a frame if values.ndim > 1: # there's a non-empty array to apply over otherwise numpy raises if notempty: if not skipna: return _wrap_results( np.apply_along_axis(get_median, axis, values), dtype) # fastpath for the skipna case return _wrap_results(np.nanmedian(values, axis), dtype) # must return the correct shape, but median is not defined for the # empty set so return nans of shape "everything but the passed axis" # since "axis" is where the reduction would occur if we had a nonempty # array shp = np.array(values.shape) dims = np.arange(values.ndim) ret = np.empty(shp[dims != axis]) ret.fill(np.nan) return _wrap_results(ret, dtype) # otherwise return a scalar value return _wrap_results(get_median(values) if notempty else np.nan, dtype)
Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0
def nanstd(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the standard deviation along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanstd(s) 1.0 """ result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) return _wrap_results(result, values.dtype)
Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0
def nanvar(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s) 1.0 """ values = com.values_from_object(values) dtype = values.dtype if mask is None: mask = isna(values) if is_any_int_dtype(values): values = values.astype('f8') values[mask] = np.nan if is_float_dtype(values): count, d = _get_counts_nanvar(mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(mask, axis, ddof) if skipna: values = values.copy() np.putmask(values, mask, 0) # xref GH10242 # Compute variance via two-pass algorithm, which is stable against # cancellation errors and relatively accurate for small numbers of # observations. # # See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d # Return variance as np.float64 (the datatype used in the accumulator), # unless we were dealing with a float array, in which case use the same # precision as the original values array. if is_float_dtype(dtype): result = result.astype(dtype) return _wrap_results(result, values.dtype)
Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258
def nansem(values, axis=None, skipna=True, ddof=1, mask=None): """ Compute the standard error in the mean along given axis while ignoring NaNs Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nansem(s) 0.5773502691896258 """ # This checks if non-numeric-like data is passed with numeric_only=False # and raises a TypeError otherwise nanvar(values, axis, skipna, ddof=ddof, mask=mask) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count, _ = _get_counts_nanvar(mask, axis, ddof, values.dtype) var = nanvar(values, axis, skipna, ddof=ddof) return np.sqrt(var) / np.sqrt(count)
Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns -------- result : int The index of max value in specified axis or -1 in the NA case Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(s) 4
def nanargmax(values, axis=None, skipna=True, mask=None): """ Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns -------- result : int The index of max value in specified axis or -1 in the NA case Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(s) 4 """ values, mask, dtype, _, _ = _get_values( values, skipna, fill_value_typ='-inf', mask=mask) result = values.argmax(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result
Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns -------- result : int The index of min value in specified axis or -1 in the NA case Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan, 4]) >>> nanops.nanargmin(s) 0
def nanargmin(values, axis=None, skipna=True, mask=None): """ Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns -------- result : int The index of min value in specified axis or -1 in the NA case Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan, 4]) >>> nanops.nanargmin(s) 0 """ values, mask, dtype, _, _ = _get_values( values, skipna, fill_value_typ='+inf', mask=mask) result = values.argmin(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result
Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787
def nanskew(values, axis=None, skipna=True, mask=None): """ Compute the sample skewness. The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G1. The algorithm computes this coefficient directly from the second and third central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 2]) >>> nanops.nanskew(s) 1.7320508075688787 """ values = com.values_from_object(values) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count = _get_counts(mask, axis) else: count = _get_counts(mask, axis, dtype=values.dtype) if skipna: values = values.copy() np.putmask(values, mask, 0) mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted3 = adjusted2 * adjusted m2 = adjusted2.sum(axis, dtype=np.float64) m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error # # #18044 in _libs/windows.pyx calc_skew follow this behavior # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) with np.errstate(invalid='ignore', divide='ignore'): result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5) dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype) if isinstance(result, np.ndarray): result = np.where(m2 == 0, 0, result) result[count < 3] = np.nan return result else: result = 0 if m2 == 0 else result if count < 3: return np.nan return result
Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076
def nankurt(values, axis=None, skipna=True, mask=None): """ Compute the sample excess kurtosis The statistic computed here is the adjusted Fisher-Pearson standardized moment coefficient G2, computed directly from the second and fourth central moment. Parameters ---------- values : ndarray axis: int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : float64 Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1,np.nan, 1, 3, 2]) >>> nanops.nankurt(s) -1.2892561983471076 """ values = com.values_from_object(values) if mask is None: mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') count = _get_counts(mask, axis) else: count = _get_counts(mask, axis, dtype=values.dtype) if skipna: values = values.copy() np.putmask(values, mask, 0) mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted4 = adjusted2 ** 2 m2 = adjusted2.sum(axis, dtype=np.float64) m4 = adjusted4.sum(axis, dtype=np.float64) with np.errstate(invalid='ignore', divide='ignore'): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numer = count * (count + 1) * (count - 1) * m4 denom = (count - 2) * (count - 3) * m2 ** 2 # floating point error # # #18044 in _libs/windows.pyx calc_kurt follow this behavior # to fix the fperr to treat denom <1e-14 as zero numer = _zero_out_fperr(numer) denom = _zero_out_fperr(denom) if not isinstance(denom, np.ndarray): # if ``denom`` is a scalar, check these corner cases first before # doing division if count < 4: return np.nan if denom == 0: return 0 with np.errstate(invalid='ignore', divide='ignore'): result = numer / denom - adj dtype = values.dtype if is_float_dtype(dtype): result = result.astype(dtype) if isinstance(result, np.ndarray): result = np.where(denom == 0, 0, result) result[count < 4] = np.nan return result
Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0 Returns -------- The product of all elements on a given axis. ( NaNs are treated as 1)
def nanprod(values, axis=None, skipna=True, min_count=0, mask=None): """ Parameters ---------- values : ndarray[dtype] axis: int, optional skipna : bool, default True min_count: int, default 0 mask : ndarray[bool], optional nan-mask if known Returns ------- result : dtype Examples -------- >>> import pandas.core.nanops as nanops >>> s = pd.Series([1, 2, 3, np.nan]) >>> nanops.nanprod(s) 6.0 Returns -------- The product of all elements on a given axis. ( NaNs are treated as 1) """ if mask is None: mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) return _maybe_null_out(result, axis, mask, min_count=min_count)
a, b: ndarrays
def nancorr(a, b, method='pearson', min_periods=None): """ a, b: ndarrays """ if len(a) != len(b): raise AssertionError('Operands to nancorr must have same size') if min_periods is None: min_periods = 1 valid = notna(a) & notna(b) if not valid.all(): a = a[valid] b = b[valid] if len(a) < min_periods: return np.nan f = get_corr_func(method) return f(a, b)