Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def head(self, n=5): self._reset_group_selection() mask = self._cumcount_array() < n return self._selected_obj[mask]
[ "\n Return first n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.head(n))``,\n except ignores as_index flag.\n %(see_also)s\n Examples\n --------\n\n >>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],\n columns=['A', 'B'])\n >>> df.groupby('A', as_index=False).head(1)\n A B\n 0 1 2\n 2 5 6\n >>> df.groupby('A').head(1)\n A B\n 0 1 2\n 2 5 6\n " ]
Please provide a description of the function:def tail(self, n=5): self._reset_group_selection() mask = self._cumcount_array(ascending=False) < n return self._selected_obj[mask]
[ "\n Return last n rows of each group.\n\n Essentially equivalent to ``.apply(lambda x: x.tail(n))``,\n except ignores as_index flag.\n %(see_also)s\n Examples\n --------\n\n >>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],\n columns=['A', 'B'])\n >>> df.groupby('A').tail(1)\n A B\n 1 a 2\n 3 b 2\n >>> df.groupby('A').head(1)\n A B\n 0 a 1\n 2 b 1\n " ]
Please provide a description of the function:def next_monday(dt): if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt
[ "\n If holiday falls on Saturday, use following Monday instead;\n if holiday falls on Sunday, use Monday instead\n " ]
Please provide a description of the function:def next_monday_or_tuesday(dt): dow = dt.weekday() if dow == 5 or dow == 6: return dt + timedelta(2) elif dow == 0: return dt + timedelta(1) return dt
[ "\n For second holiday of two adjacent ones!\n If holiday falls on Saturday, use following Monday instead;\n if holiday falls on Sunday or Monday, use following Tuesday instead\n (because Monday is already taken by adjacent holiday on the day before)\n " ]
Please provide a description of the function:def previous_friday(dt): if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt
[ "\n If holiday falls on Saturday or Sunday, use previous Friday instead.\n " ]
Please provide a description of the function:def weekend_to_monday(dt): if dt.weekday() == 6: return dt + timedelta(1) elif dt.weekday() == 5: return dt + timedelta(2) return dt
[ "\n If holiday falls on Sunday or Saturday,\n use day thereafter (Monday) instead.\n Needed for holidays such as Christmas observation in Europe\n " ]
Please provide a description of the function:def nearest_workday(dt): if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt + timedelta(1) return dt
[ "\n If holiday falls on Saturday, use day before (Friday) instead;\n if holiday falls on Sunday, use day thereafter (Monday) instead.\n " ]
Please provide a description of the function:def next_workday(dt): dt += timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt += timedelta(days=1) return dt
[ "\n returns next weekday used for observances\n " ]
Please provide a description of the function:def previous_workday(dt): dt -= timedelta(days=1) while dt.weekday() > 4: # Mon-Fri are 0-4 dt -= timedelta(days=1) return dt
[ "\n returns previous weekday used for observances\n " ]
Please provide a description of the function:def dates(self, start_date, end_date, return_name=False): start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) if return_name: return Series(self.name, index=[dt]) else: return [dt] dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek, self.days_of_week)] if self.start_date is not None: filter_start_date = max(self.start_date.tz_localize( filter_start_date.tz), filter_start_date) if self.end_date is not None: filter_end_date = min(self.end_date.tz_localize( filter_end_date.tz), filter_end_date) holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates
[ "\n Calculate holidays observed between start date and end date\n\n Parameters\n ----------\n start_date : starting date, datetime-like, optional\n end_date : ending date, datetime-like, optional\n return_name : bool, optional, default=False\n If True, return a series that has dates and holiday names.\n False will only return dates.\n " ]
Please provide a description of the function:def _reference_dates(self, start_date, end_date): if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp( datetime(start_date.year - 1, self.month, self.day)) reference_end_date = Timestamp( datetime(end_date.year + 1, self.month, self.day)) # Don't process unnecessary holidays dates = date_range(start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz) return dates
[ "\n Get reference dates for the holiday.\n\n Return reference dates for the holiday also returning the year\n prior to the start_date and year following the end_date. This ensures\n that any offsets to be applied will yield the holidays within\n the passed in dates.\n " ]
Please provide a description of the function:def _apply_rule(self, dates): if self.observance is not None: return dates.map(lambda d: self.observance(d)) if self.offset is not None: if not isinstance(self.offset, list): offsets = [self.offset] else: offsets = self.offset for offset in offsets: # if we are adding a non-vectorized value # ignore the PerformanceWarnings: with warnings.catch_warnings(): warnings.simplefilter("ignore", PerformanceWarning) dates += offset return dates
[ "\n Apply the given offset/observance to a DatetimeIndex of dates.\n\n Parameters\n ----------\n dates : DatetimeIndex\n Dates to apply the given offset/observance rule\n\n Returns\n -------\n Dates with rules applied\n " ]
Please provide a description of the function:def holidays(self, start=None, end=None, return_name=False): if self.rules is None: raise Exception('Holiday Calendar {name} does not have any ' 'rules specified'.format(name=self.name)) if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) holidays = None # If we don't have a cache or the dates are outside the prior cache, we # get them again if (self._cache is None or start < self._cache[0] or end > self._cache[1]): for rule in self.rules: rule_holidays = rule.dates(start, end, return_name=True) if holidays is None: holidays = rule_holidays else: holidays = holidays.append(rule_holidays) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index
[ "\n Returns a curve with holidays between start_date and end_date\n\n Parameters\n ----------\n start : starting date, datetime-like, optional\n end : ending date, datetime-like, optional\n return_name : bool, optional\n If True, return a series that has dates and holiday names.\n False will only return a DatetimeIndex of dates.\n\n Returns\n -------\n DatetimeIndex of holidays\n " ]
Please provide a description of the function:def merge_class(base, other): try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values())
[ "\n Merge holiday calendars together. The base calendar\n will take precedence to other. The merge will be done\n based on each holiday's name.\n\n Parameters\n ----------\n base : AbstractHolidayCalendar\n instance/subclass or array of Holiday objects\n other : AbstractHolidayCalendar\n instance/subclass or array of Holiday objects\n " ]
Please provide a description of the function:def merge(self, other, inplace=False): holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays
[ "\n Merge holiday calendars together. The caller's class\n rules take precedence. The merge will be done\n based on each holiday's name.\n\n Parameters\n ----------\n other : holiday calendar\n inplace : bool (default=False)\n If True set rule_table to holidays, else return array of Holidays\n " ]
Please provide a description of the function:def register_option(key, defval, doc='', validator=None, cb=None): import tokenize import keyword key = key.lower() if key in _registered_options: msg = "Option '{key}' has already been registered" raise OptionError(msg.format(key=key)) if key in _reserved_keys: msg = "Option '{key}' is a reserved key" raise OptionError(msg.format(key=key)) # the default value should be legal if validator: validator(defval) # walk the nested dict, creating dicts as needed along the path path = key.split('.') for k in path: if not bool(re.match('^' + tokenize.Name + '$', k)): raise ValueError("{k} is not a valid identifier".format(k=k)) if keyword.iskeyword(k): raise ValueError("{k} is a python keyword".format(k=k)) cursor = _global_config msg = "Path prefix to option '{option}' is already an option" for i, p in enumerate(path[:-1]): if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:i]))) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:-1]))) cursor[path[-1]] = defval # initialize # save the option metadata _registered_options[key] = RegisteredOption(key=key, defval=defval, doc=doc, validator=validator, cb=cb)
[ "Register an option in the package-wide pandas config object\n\n Parameters\n ----------\n key - a fully-qualified key, e.g. \"x.y.option - z\".\n defval - the default value of the option\n doc - a string description of the option\n validator - a function of a single argument, should raise `ValueError` if\n called with a value which is not a legal value for the option.\n cb - a function of a single argument \"key\", which is called\n immediately after an option value is set/reset. key is\n the full name of the option.\n\n Returns\n -------\n Nothing.\n\n Raises\n ------\n ValueError if `validator` is specified and `defval` is not a valid value.\n\n " ]
Please provide a description of the function:def deprecate_option(key, msg=None, rkey=None, removal_ver=None): key = key.lower() if key in _deprecated_options: msg = "Option '{key}' has already been defined as deprecated." raise OptionError(msg.format(key=key)) _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
[ "\n Mark option `key` as deprecated, if code attempts to access this option,\n a warning will be produced, using `msg` if given, or a default message\n if not.\n if `rkey` is given, any access to the key will be re-routed to `rkey`.\n\n Neither the existence of `key` nor that if `rkey` is checked. If they\n do not exist, any subsequence access will fail as usual, after the\n deprecation warning is given.\n\n Parameters\n ----------\n key - the name of the option to be deprecated. must be a fully-qualified\n option name (e.g \"x.y.z.rkey\").\n\n msg - (Optional) a warning message to output when the key is referenced.\n if no message is given a default message will be emitted.\n\n rkey - (Optional) the name of an option to reroute access to.\n If specified, any referenced `key` will be re-routed to `rkey`\n including set/get/reset.\n rkey must be a fully-qualified option name (e.g \"x.y.z.rkey\").\n used by the default message if no `msg` is specified.\n\n removal_ver - (Optional) specifies the version in which this option will\n be removed. used by the default message if no `msg`\n is specified.\n\n Returns\n -------\n Nothing\n\n Raises\n ------\n OptionError - if key has already been deprecated.\n\n " ]
Please provide a description of the function:def _select_options(pat): # short-circuit for exact key if pat in _registered_options: return [pat] # else look through all of them keys = sorted(_registered_options.keys()) if pat == 'all': # reserved key return keys return [k for k in keys if re.search(pat, k, re.I)]
[ "returns a list of keys matching `pat`\n\n if pat==\"all\", returns all registered options\n " ]
Please provide a description of the function:def _translate_key(key): d = _get_deprecated_option(key) if d: return d.rkey or key else: return key
[ "\n if key id deprecated and a replacement key defined, will return the\n replacement key, otherwise returns `key` as - is\n " ]
Please provide a description of the function:def _build_option_description(k): o = _get_registered_option(k) d = _get_deprecated_option(k) s = '{k} '.format(k=k) if o.doc: s += '\n'.join(o.doc.strip().split('\n')) else: s += 'No description available.' if o: s += ('\n [default: {default}] [currently: {current}]' .format(default=o.defval, current=_get_option(k, True))) if d: s += '\n (Deprecated' s += (', use `{rkey}` instead.' .format(rkey=d.rkey if d.rkey else '')) s += ')' return s
[ " Builds a formatted description of a registered option and prints it " ]
Please provide a description of the function:def config_prefix(prefix): # Note: reset_option relies on set_option, and on key directly # it does not fit in to this monkey-patching scheme global register_option, get_option, set_option, reset_option def wrap(func): def inner(key, *args, **kwds): pkey = '{prefix}.{key}'.format(prefix=prefix, key=key) return func(pkey, *args, **kwds) return inner _register_option = register_option _get_option = get_option _set_option = set_option set_option = wrap(set_option) get_option = wrap(get_option) register_option = wrap(register_option) yield None set_option = _set_option get_option = _get_option register_option = _register_option
[ "contextmanager for multiple invocations of API with a common prefix\n\n supported API functions: (register / get / set )__option\n\n Warning: This is not thread - safe, and won't work properly if you import\n the API functions into your module using the \"from x import y\" construct.\n\n Example:\n\n import pandas._config.config as cf\n with cf.config_prefix(\"display.font\"):\n cf.register_option(\"color\", \"red\")\n cf.register_option(\"size\", \" 5 pt\")\n cf.set_option(size, \" 6 pt\")\n cf.get_option(size)\n ...\n\n etc'\n\n will register options \"display.font.color\", \"display.font.size\", set the\n value of \"display.font.size\"... and so on.\n " ]
Please provide a description of the function:def parse(self, declarations_str): for decl in declarations_str.split(';'): if not decl.strip(): continue prop, sep, val = decl.partition(':') prop = prop.strip().lower() # TODO: don't lowercase case sensitive parts of values (strings) val = val.strip().lower() if sep: yield prop, val else: warnings.warn('Ill-formatted attribute: expected a colon ' 'in {decl!r}'.format(decl=decl), CSSWarning)
[ "Generates (prop, value) pairs from declarations\n\n In a future version may generate parsed tokens from tinycss/tinycss2\n " ]
Please provide a description of the function:def array(data: Sequence[object], dtype: Optional[Union[str, np.dtype, ExtensionDtype]] = None, copy: bool = True, ) -> ABCExtensionArray: from pandas.core.arrays import ( period_array, ExtensionArray, IntervalArray, PandasArray, DatetimeArray, TimedeltaArray, ) from pandas.core.internals.arrays import extract_array if lib.is_scalar(data): msg = ( "Cannot pass scalar '{}' to 'pandas.array'." ) raise ValueError(msg.format(data)) data = extract_array(data, extract_numpy=True) if dtype is None and isinstance(data, ExtensionArray): dtype = data.dtype # this returns None for not-found dtypes. if isinstance(dtype, str): dtype = registry.find(dtype) or dtype if is_extension_array_dtype(dtype): cls = dtype.construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: inferred_dtype = lib.infer_dtype(data, skipna=False) if inferred_dtype == 'period': try: return period_array(data, copy=copy) except tslibs.IncompatibleFrequency: # We may have a mixture of frequencies. # We choose to return an ndarray, rather than raising. pass elif inferred_dtype == 'interval': try: return IntervalArray(data, copy=copy) except ValueError: # We may have a mixture of `closed` here. # We choose to return an ndarray, rather than raising. pass elif inferred_dtype.startswith('datetime'): # datetime, datetime64 try: return DatetimeArray._from_sequence(data, copy=copy) except ValueError: # Mixture of timezones, fall back to PandasArray pass elif inferred_dtype.startswith('timedelta'): # timedelta, timedelta64 return TimedeltaArray._from_sequence(data, copy=copy) # TODO(BooleanArray): handle this type # Pandas overrides NumPy for # 1. datetime64[ns] # 2. timedelta64[ns] # so that a DatetimeArray is returned. if is_datetime64_ns_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) elif is_timedelta64_ns_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) result = PandasArray._from_sequence(data, dtype=dtype, copy=copy) return result
[ "\n Create an array.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n data : Sequence of objects\n The scalars inside `data` should be instances of the\n scalar type for `dtype`. It's expected that `data`\n represents a 1-dimensional array of data.\n\n When `data` is an Index or Series, the underlying array\n will be extracted from `data`.\n\n dtype : str, np.dtype, or ExtensionDtype, optional\n The dtype to use for the array. This may be a NumPy\n dtype or an extension type registered with pandas using\n :meth:`pandas.api.extensions.register_extension_dtype`.\n\n If not specified, there are two possibilities:\n\n 1. When `data` is a :class:`Series`, :class:`Index`, or\n :class:`ExtensionArray`, the `dtype` will be taken\n from the data.\n 2. Otherwise, pandas will attempt to infer the `dtype`\n from the data.\n\n Note that when `data` is a NumPy array, ``data.dtype`` is\n *not* used for inferring the array type. This is because\n NumPy cannot represent all the types of data that can be\n held in extension arrays.\n\n Currently, pandas will infer an extension dtype for sequences of\n\n ============================== =====================================\n Scalar Type Array Type\n ============================== =====================================\n :class:`pandas.Interval` :class:`pandas.arrays.IntervalArray`\n :class:`pandas.Period` :class:`pandas.arrays.PeriodArray`\n :class:`datetime.datetime` :class:`pandas.arrays.DatetimeArray`\n :class:`datetime.timedelta` :class:`pandas.arrays.TimedeltaArray`\n ============================== =====================================\n\n For all other cases, NumPy's usual inference rules will be used.\n\n copy : bool, default True\n Whether to copy the data, even if not necessary. Depending\n on the type of `data`, creating the new array may require\n copying data, even if ``copy=False``.\n\n Returns\n -------\n ExtensionArray\n The newly created array.\n\n Raises\n ------\n ValueError\n When `data` is not 1-dimensional.\n\n See Also\n --------\n numpy.array : Construct a NumPy array.\n Series : Construct a pandas Series.\n Index : Construct a pandas Index.\n arrays.PandasArray : ExtensionArray wrapping a NumPy array.\n Series.array : Extract the array stored within a Series.\n\n Notes\n -----\n Omitting the `dtype` argument means pandas will attempt to infer the\n best array type from the values in the data. As new array types are\n added by pandas and 3rd party libraries, the \"best\" array type may\n change. We recommend specifying `dtype` to ensure that\n\n 1. the correct array type for the data is returned\n 2. the returned array type doesn't change as new extension types\n are added by pandas and third-party libraries\n\n Additionally, if the underlying memory representation of the returned\n array matters, we recommend specifying the `dtype` as a concrete object\n rather than a string alias or allowing it to be inferred. For example,\n a future version of pandas or a 3rd-party library may include a\n dedicated ExtensionArray for string data. In this event, the following\n would no longer return a :class:`arrays.PandasArray` backed by a NumPy\n array.\n\n >>> pd.array(['a', 'b'], dtype=str)\n <PandasArray>\n ['a', 'b']\n Length: 2, dtype: str32\n\n This would instead return the new ExtensionArray dedicated for string\n data. If you really need the new array to be backed by a NumPy array,\n specify that in the dtype.\n\n >>> pd.array(['a', 'b'], dtype=np.dtype(\"<U1\"))\n <PandasArray>\n ['a', 'b']\n Length: 2, dtype: str32\n\n Or use the dedicated constructor for the array you're expecting, and\n wrap that in a PandasArray\n\n >>> pd.array(np.array(['a', 'b'], dtype='<U1'))\n <PandasArray>\n ['a', 'b']\n Length: 2, dtype: str32\n\n Finally, Pandas has arrays that mostly overlap with NumPy\n\n * :class:`arrays.DatetimeArray`\n * :class:`arrays.TimedeltaArray`\n\n When data with a ``datetime64[ns]`` or ``timedelta64[ns]`` dtype is\n passed, pandas will always return a ``DatetimeArray`` or ``TimedeltaArray``\n rather than a ``PandasArray``. This is for symmetry with the case of\n timezone-aware data, which NumPy does not natively support.\n\n >>> pd.array(['2015', '2016'], dtype='datetime64[ns]')\n <DatetimeArray>\n ['2015-01-01 00:00:00', '2016-01-01 00:00:00']\n Length: 2, dtype: datetime64[ns]\n\n >>> pd.array([\"1H\", \"2H\"], dtype='timedelta64[ns]')\n <TimedeltaArray>\n ['01:00:00', '02:00:00']\n Length: 2, dtype: timedelta64[ns]\n\n Examples\n --------\n If a dtype is not specified, `data` is passed through to\n :meth:`numpy.array`, and a :class:`arrays.PandasArray` is returned.\n\n >>> pd.array([1, 2])\n <PandasArray>\n [1, 2]\n Length: 2, dtype: int64\n\n Or the NumPy dtype can be specified\n\n >>> pd.array([1, 2], dtype=np.dtype(\"int32\"))\n <PandasArray>\n [1, 2]\n Length: 2, dtype: int32\n\n You can use the string alias for `dtype`\n\n >>> pd.array(['a', 'b', 'a'], dtype='category')\n [a, b, a]\n Categories (2, object): [a, b]\n\n Or specify the actual dtype\n\n >>> pd.array(['a', 'b', 'a'],\n ... dtype=pd.CategoricalDtype(['a', 'b', 'c'], ordered=True))\n [a, b, a]\n Categories (3, object): [a < b < c]\n\n Because omitting the `dtype` passes the data through to NumPy,\n a mixture of valid integers and NA will return a floating-point\n NumPy array.\n\n >>> pd.array([1, 2, np.nan])\n <PandasArray>\n [1.0, 2.0, nan]\n Length: 3, dtype: float64\n\n To use pandas' nullable :class:`pandas.arrays.IntegerArray`, specify\n the dtype:\n\n >>> pd.array([1, 2, np.nan], dtype='Int64')\n <IntegerArray>\n [1, 2, NaN]\n Length: 3, dtype: Int64\n\n Pandas will infer an ExtensionArray for some types of data:\n\n >>> pd.array([pd.Period('2000', freq=\"D\"), pd.Period(\"2000\", freq=\"D\")])\n <PeriodArray>\n ['2000-01-01', '2000-01-01']\n Length: 2, dtype: period[D]\n\n `data` must be 1-dimensional. A ValueError is raised when the input\n has the wrong dimensionality.\n\n >>> pd.array(1)\n Traceback (most recent call last):\n ...\n ValueError: Cannot pass scalar '1' to 'pandas.array'.\n " ]
Please provide a description of the function:def maybe_convert_platform_interval(values): if isinstance(values, (list, tuple)) and len(values) == 0: # GH 19016 # empty lists/tuples get object dtype by default, but this is not # prohibited for IntervalArray, so coerce to integer instead return np.array([], dtype=np.int64) elif is_categorical_dtype(values): values = np.asarray(values) return maybe_convert_platform(values)
[ "\n Try to do platform conversion, with special casing for IntervalArray.\n Wrapper around maybe_convert_platform that alters the default return\n dtype in certain cases to be compatible with IntervalArray. For example,\n empty lists return with integer dtype instead of object dtype, which is\n prohibited for IntervalArray.\n\n Parameters\n ----------\n values : array-like\n\n Returns\n -------\n array\n " ]
Please provide a description of the function:def is_file_like(obj): if not (hasattr(obj, 'read') or hasattr(obj, 'write')): return False if not hasattr(obj, "__iter__"): return False return True
[ "\n Check if the object is a file-like object.\n\n For objects to be considered file-like, they must\n be an iterator AND have either a `read` and/or `write`\n method as an attribute.\n\n Note: file-like objects must be iterable, but\n iterable objects need not be file-like.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_file_like : bool\n Whether `obj` has file-like properties.\n\n Examples\n --------\n >>> buffer(StringIO(\"data\"))\n >>> is_file_like(buffer)\n True\n >>> is_file_like([1, 2, 3])\n False\n " ]
Please provide a description of the function:def is_list_like(obj, allow_sets=True): return (isinstance(obj, abc.Iterable) and # we do not count strings/unicode/bytes as list-like not isinstance(obj, (str, bytes)) and # exclude zero-dimensional numpy arrays, effectively scalars not (isinstance(obj, np.ndarray) and obj.ndim == 0) and # exclude sets if allow_sets is False not (allow_sets is False and isinstance(obj, abc.Set)))
[ "\n Check if the object is list-like.\n\n Objects that are considered list-like are for example Python\n lists, tuples, sets, NumPy arrays, and Pandas Series.\n\n Strings and datetime objects, however, are not considered list-like.\n\n Parameters\n ----------\n obj : The object to check\n allow_sets : boolean, default True\n If this parameter is False, sets will not be considered list-like\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n is_list_like : bool\n Whether `obj` has list-like properties.\n\n Examples\n --------\n >>> is_list_like([1, 2, 3])\n True\n >>> is_list_like({1, 2, 3})\n True\n >>> is_list_like(datetime(2017, 1, 1))\n False\n >>> is_list_like(\"foo\")\n False\n >>> is_list_like(1)\n False\n >>> is_list_like(np.array([2]))\n True\n >>> is_list_like(np.array(2)))\n False\n " ]
Please provide a description of the function:def is_nested_list_like(obj): return (is_list_like(obj) and hasattr(obj, '__len__') and len(obj) > 0 and all(is_list_like(item) for item in obj))
[ "\n Check if the object is list-like, and that all of its elements\n are also list-like.\n\n .. versionadded:: 0.20.0\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_list_like : bool\n Whether `obj` has list-like properties.\n\n Examples\n --------\n >>> is_nested_list_like([[1, 2, 3]])\n True\n >>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])\n True\n >>> is_nested_list_like([\"foo\"])\n False\n >>> is_nested_list_like([])\n False\n >>> is_nested_list_like([[1, 2, 3], 1])\n False\n\n Notes\n -----\n This won't reliably detect whether a consumable iterator (e. g.\n a generator) is a nested-list-like without consuming the iterator.\n To avoid consuming it, we always return False if the outer container\n doesn't define `__len__`.\n\n See Also\n --------\n is_list_like\n " ]
Please provide a description of the function:def is_dict_like(obj): dict_like_attrs = ("__getitem__", "keys", "__contains__") return (all(hasattr(obj, attr) for attr in dict_like_attrs) # [GH 25196] exclude classes and not isinstance(obj, type))
[ "\n Check if the object is dict-like.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_dict_like : bool\n Whether `obj` has dict-like properties.\n\n Examples\n --------\n >>> is_dict_like({1: 2})\n True\n >>> is_dict_like([1, 2, 3])\n False\n >>> is_dict_like(dict)\n False\n >>> is_dict_like(dict())\n True\n " ]
Please provide a description of the function:def is_sequence(obj): try: iter(obj) # Can iterate over it. len(obj) # Has a length associated with it. return not isinstance(obj, (str, bytes)) except (TypeError, AttributeError): return False
[ "\n Check if the object is a sequence of objects.\n String types are not included as sequences here.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n is_sequence : bool\n Whether `obj` is a sequence of objects.\n\n Examples\n --------\n >>> l = [1, 2, 3]\n >>>\n >>> is_sequence(l)\n True\n >>> is_sequence(iter(l))\n False\n " ]
Please provide a description of the function:def _new_DatetimeIndex(cls, d): if "data" in d and not isinstance(d["data"], DatetimeIndex): # Avoid need to verify integrity by calling simple_new directly data = d.pop("data") result = cls._simple_new(data, **d) else: with warnings.catch_warnings(): # we ignore warnings from passing verify_integrity=False # TODO: If we knew what was going in to **d, we might be able to # go through _simple_new instead warnings.simplefilter("ignore") result = cls.__new__(cls, verify_integrity=False, **d) return result
[ " This is called upon unpickling, rather than the default which doesn't\n have arguments and breaks __new__ " ]
Please provide a description of the function:def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs): if freq is None and com._any_none(periods, start, end): freq = 'D' dtarr = DatetimeArray._generate_range( start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, closed=closed, **kwargs) return DatetimeIndex._simple_new( dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name)
[ "\n Return a fixed frequency DatetimeIndex.\n\n Parameters\n ----------\n start : str or datetime-like, optional\n Left bound for generating dates.\n end : str or datetime-like, optional\n Right bound for generating dates.\n periods : integer, optional\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'. See\n :ref:`here <timeseries.offset_aliases>` for a list of\n frequency aliases.\n tz : str or tzinfo, optional\n Time zone name for returning localized DatetimeIndex, for example\n 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is\n timezone-naive.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None, the default).\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n rng : DatetimeIndex\n\n See Also\n --------\n DatetimeIndex : An immutable container for datetimes.\n timedelta_range : Return a fixed frequency TimedeltaIndex.\n period_range : Return a fixed frequency PeriodIndex.\n interval_range : Return a fixed frequency IntervalIndex.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``DatetimeIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n **Specifying the values**\n\n The next four examples generate the same `DatetimeIndex`, but vary\n the combination of `start`, `end` and `periods`.\n\n Specify `start` and `end`, with the default daily frequency.\n\n >>> pd.date_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start` and `periods`, the number of periods (days).\n\n >>> pd.date_range(start='1/1/2018', periods=8)\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `end` and `periods`, the number of periods (days).\n\n >>> pd.date_range(end='1/1/2018', periods=8)\n DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',\n '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq='D')\n\n Specify `start`, `end`, and `periods`; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.date_range(start='2018-04-24', end='2018-04-27', periods=3)\n DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',\n '2018-04-27 00:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Other Parameters**\n\n Changed the `freq` (frequency) to ``'M'`` (month end frequency).\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='M')\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',\n '2018-05-31'],\n dtype='datetime64[ns]', freq='M')\n\n Multiples are allowed\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq='3M')\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3M')\n\n `freq` can also be specified as an Offset object.\n\n >>> pd.date_range(start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3))\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq='3M')\n\n Specify `tz` to set the timezone.\n\n >>> pd.date_range(start='1/1/2018', periods=5, tz='Asia/Tokyo')\n DatetimeIndex(['2018-01-01 00:00:00+09:00', '2018-01-02 00:00:00+09:00',\n '2018-01-03 00:00:00+09:00', '2018-01-04 00:00:00+09:00',\n '2018-01-05 00:00:00+09:00'],\n dtype='datetime64[ns, Asia/Tokyo]', freq='D')\n\n `closed` controls whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed=None)\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``closed='left'`` to exclude `end` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='left')\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'],\n dtype='datetime64[ns]', freq='D')\n\n Use ``closed='right'`` to exclude `start` if it falls on the boundary.\n\n >>> pd.date_range(start='2017-01-01', end='2017-01-04', closed='right')\n DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq='D')\n " ]
Please provide a description of the function:def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, normalize=True, name=None, weekmask=None, holidays=None, closed=None, **kwargs): if freq is None: msg = 'freq must be specified for bdate_range; use date_range instead' raise TypeError(msg) if is_string_like(freq) and freq.startswith('C'): try: weekmask = weekmask or 'Mon Tue Wed Thu Fri' freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError): msg = 'invalid custom frequency string: {freq}'.format(freq=freq) raise ValueError(msg) elif holidays or weekmask: msg = ('a custom frequency string is required when holidays or ' 'weekmask are passed, got frequency {freq}').format(freq=freq) raise ValueError(msg) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
[ "\n Return a fixed frequency DatetimeIndex, with business day as the default\n frequency\n\n Parameters\n ----------\n start : string or datetime-like, default None\n Left bound for generating dates.\n end : string or datetime-like, default None\n Right bound for generating dates.\n periods : integer, default None\n Number of periods to generate.\n freq : string or DateOffset, default 'B' (business daily)\n Frequency strings can have multiples, e.g. '5H'.\n tz : string or None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : string, default None\n Name of the resulting DatetimeIndex.\n weekmask : string or None, default None\n Weekmask of valid business days, passed to ``numpy.busdaycalendar``,\n only used when custom frequency strings are passed. The default\n value None is equivalent to 'Mon Tue Wed Thu Fri'.\n\n .. versionadded:: 0.21.0\n\n holidays : list-like or None, default None\n Dates to exclude from the set of valid business days, passed to\n ``numpy.busdaycalendar``, only used when custom frequency strings\n are passed.\n\n .. versionadded:: 0.21.0\n\n closed : string, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None).\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n DatetimeIndex\n\n Notes\n -----\n Of the four parameters: ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. Specifying ``freq`` is a requirement\n for ``bdate_range``. Use ``date_range`` if specifying ``freq`` is not\n desired.\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n Note how the two weekend days are skipped in the result.\n\n >>> pd.bdate_range(start='1/1/2018', end='1/08/2018')\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-08'],\n dtype='datetime64[ns]', freq='B')\n " ]
Please provide a description of the function:def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, normalize=True, name=None, closed=None, **kwargs): warnings.warn("cdate_range is deprecated and will be removed in a future " "version, instead use pd.bdate_range(..., freq='{freq}')" .format(freq=freq), FutureWarning, stacklevel=2) if freq == 'C': holidays = kwargs.pop('holidays', []) weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri') freq = CDay(holidays=holidays, weekmask=weekmask) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, closed=closed, **kwargs)
[ "\n Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the\n default frequency\n\n .. deprecated:: 0.21.0\n\n Parameters\n ----------\n start : string or datetime-like, default None\n Left bound for generating dates\n end : string or datetime-like, default None\n Right bound for generating dates\n periods : integer, default None\n Number of periods to generate\n freq : string or DateOffset, default 'C' (CustomBusinessDay)\n Frequency strings can have multiples, e.g. '5H'\n tz : string, default None\n Time zone name for returning localized DatetimeIndex, for example\n Asia/Beijing\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range\n name : string, default None\n Name of the resulting DatetimeIndex\n weekmask : string, Default 'Mon Tue Wed Thu Fri'\n weekmask of valid business days, passed to ``numpy.busdaycalendar``\n holidays : list\n list/array of dates to exclude from the set of valid business days,\n passed to ``numpy.busdaycalendar``\n closed : string, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None)\n\n Notes\n -----\n Of the three parameters: ``start``, ``end``, and ``periods``, exactly two\n must be specified.\n\n To learn more about the frequency strings, please see `this link\n <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.\n\n Returns\n -------\n rng : DatetimeIndex\n " ]
Please provide a description of the function:def _create_blocks(self): obj, index = self._convert_freq() if index is not None: index = self._on # filter out the on from the object if self.on is not None: if obj.ndim == 2: obj = obj.reindex(columns=obj.columns.difference([self.on]), copy=False) blocks = obj._to_dict_of_blocks(copy=False).values() return blocks, obj, index
[ "\n Split data into blocks & return conformed data.\n " ]
Please provide a description of the function:def _gotitem(self, key, ndim, subset=None): # create a new object to prevent aliasing if subset is None: subset = self.obj self = self._shallow_copy(subset) self._reset_cache() if subset.ndim == 2: if is_scalar(key) and key in subset or is_list_like(key): self._selection = key return self
[ "\n Sub-classes to define. Return a sliced object.\n\n Parameters\n ----------\n key : str / list of selections\n ndim : 1,2\n requested ndim of result\n subset : object, default None\n subset to act on\n " ]
Please provide a description of the function:def _get_index(self, index=None): if self.is_freq_type: if index is None: index = self._on return index, index.asi8 return index, index
[ "\n Return index as ndarrays.\n\n Returns\n -------\n tuple of (index, index_as_ndarray)\n " ]
Please provide a description of the function:def _wrap_result(self, result, block=None, obj=None): if obj is None: obj = self._selected_obj index = obj.index if isinstance(result, np.ndarray): # coerce if necessary if block is not None: if is_timedelta64_dtype(block.values.dtype): from pandas import to_timedelta result = to_timedelta( result.ravel(), unit='ns').values.reshape(result.shape) if result.ndim == 1: from pandas import Series return Series(result, index, name=obj.name) return type(obj)(result, index=index, columns=block.columns) return result
[ "\n Wrap a single result.\n " ]
Please provide a description of the function:def _wrap_results(self, results, blocks, obj): from pandas import Series, concat from pandas.core.index import ensure_index final = [] for result, block in zip(results, blocks): result = self._wrap_result(result, block=block, obj=obj) if result.ndim == 1: return result final.append(result) # if we have an 'on' column # we want to put it back into the results # in the same location columns = self._selected_obj.columns if self.on is not None and not self._on.equals(obj.index): name = self._on.name final.append(Series(self._on, index=obj.index, name=name)) if self._selection is not None: selection = ensure_index(self._selection) # need to reorder to include original location of # the on column (if its not already there) if name not in selection: columns = self.obj.columns indexer = columns.get_indexer(selection.tolist() + [name]) columns = columns.take(sorted(indexer)) if not len(final): return obj.astype('float64') return concat(final, axis=1).reindex(columns=columns, copy=False)
[ "\n Wrap the results.\n\n Parameters\n ----------\n results : list of ndarrays\n blocks : list of blocks\n obj : conformed data (may be resampled)\n " ]
Please provide a description of the function:def _center_window(self, result, window): if self.axis > result.ndim - 1: raise ValueError("Requested axis is larger then no. of argument " "dimensions") offset = _offset(window, True) if offset > 0: if isinstance(result, (ABCSeries, ABCDataFrame)): result = result.slice_shift(-offset, axis=self.axis) else: lead_indexer = [slice(None)] * result.ndim lead_indexer[self.axis] = slice(offset, None) result = np.copy(result[tuple(lead_indexer)]) return result
[ "\n Center the result in the window.\n " ]
Please provide a description of the function:def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig # the below may pop from kwargs def _validate_win_type(win_type, kwargs): arg_map = {'kaiser': ['beta'], 'gaussian': ['std'], 'general_gaussian': ['power', 'width'], 'slepian': ['width']} if win_type in arg_map: return tuple([win_type] + _pop_args(win_type, arg_map[win_type], kwargs)) return win_type def _pop_args(win_type, arg_names, kwargs): msg = '%s window requires %%s' % win_type all_args = [] for n in arg_names: if n not in kwargs: raise ValueError(msg % n) all_args.append(kwargs.pop(n)) return all_args win_type = _validate_win_type(self.win_type, kwargs) # GH #15662. `False` makes symmetric window, rather than periodic. return sig.get_window(win_type, window, False).astype(float)
[ "\n Provide validation for our window type, return the window\n we have already been validated.\n " ]
Please provide a description of the function:def _apply_window(self, mean=True, **kwargs): window = self._prep_window(**kwargs) center = self.center blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def f(arg, *args, **kwargs): minp = _use_window(self.min_periods, len(window)) return libwindow.roll_window(np.concatenate((arg, additional_nans)) if center else arg, window, minp, avg=mean) result = np.apply_along_axis(f, self.axis, values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
[ "\n Applies a moving window of type ``window_type`` on the data.\n\n Parameters\n ----------\n mean : bool, default True\n If True computes weighted mean, else weighted sum\n\n Returns\n -------\n y : same type as input argument\n\n " ]
Please provide a description of the function:def _apply(self, func, name, window=None, center=None, check_minp=None, **kwargs): def f(x, name=name, *args): x = self._shallow_copy(x) if isinstance(name, str): return getattr(x, name)(*args, **kwargs) return x.apply(name, *args, **kwargs) return self._groupby.apply(f)
[ "\n Dispatch to apply; we are stripping all of the _apply kwargs and\n performing the original function call on the grouped object.\n " ]
Please provide a description of the function:def _apply(self, func, name=None, window=None, center=None, check_minp=None, **kwargs): if center is None: center = self.center if window is None: window = self._get_window() if check_minp is None: check_minp = _use_window blocks, obj, index = self._create_blocks() index, indexi = self._get_index(index=index) results = [] for b in blocks: values = self._prep_values(b.values) if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats arg = ensure_float64(arg) return cfunc(arg, window, minp, indexi, closed, **kwargs) # calculation function if center: offset = _offset(window, center) additional_nans = np.array([np.NaN] * offset) def calc(x): return func(np.concatenate((x, additional_nans)), window, min_periods=self.min_periods, closed=self.closed) else: def calc(x): return func(x, window, min_periods=self.min_periods, closed=self.closed) with np.errstate(all='ignore'): if values.ndim > 1: result = np.apply_along_axis(calc, self.axis, values) else: result = calc(values) if center: result = self._center_window(result, window) results.append(result) return self._wrap_results(results, blocks, obj)
[ "\n Rolling statistical measure using supplied function.\n\n Designed to be used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : str/callable to apply\n name : str, optional\n name of this function\n window : int/array, default to _get_window()\n center : bool, default to self.center\n check_minp : function, default to _use_window\n\n Returns\n -------\n y : type of input\n " ]
Please provide a description of the function:def _validate_monotonic(self): if not self._on.is_monotonic: formatted = self.on or 'index' raise ValueError("{0} must be " "monotonic".format(formatted))
[ "\n Validate on is_monotonic.\n " ]
Please provide a description of the function:def _validate_freq(self): from pandas.tseries.frequencies import to_offset try: return to_offset(self.window) except (TypeError, ValueError): raise ValueError("passed window {0} is not " "compatible with a datetimelike " "index".format(self.window))
[ "\n Validate & return window frequency.\n " ]
Please provide a description of the function:def _get_window(self, other=None): axis = self.obj._get_axis(self.axis) length = len(axis) + (other is not None) * len(axis) other = self.min_periods or -1 return max(length, other)
[ "\n Get the window length over which to perform some operation.\n\n Parameters\n ----------\n other : object, default None\n The other object that is involved in the operation.\n Such an object is involved for operations like covariance.\n\n Returns\n -------\n window : int\n The window length.\n " ]
Please provide a description of the function:def _apply(self, func, **kwargs): blocks, obj, index = self._create_blocks() results = [] for b in blocks: try: values = self._prep_values(b.values) except TypeError: results.append(b.values.copy()) continue if values.size == 0: results.append(values.copy()) continue # if we have a string function name, wrap it if isinstance(func, str): cfunc = getattr(libwindow, func, None) if cfunc is None: raise ValueError("we do not support this function " "in libwindow.{func}".format(func=func)) def func(arg): return cfunc(arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods)) results.append(np.apply_along_axis(func, self.axis, values)) return self._wrap_results(results, blocks, obj)
[ "\n Rolling statistical measure using supplied function. Designed to be\n used with passed-in Cython array-based functions.\n\n Parameters\n ----------\n func : str/callable to apply\n\n Returns\n -------\n y : same type as input argument\n " ]
Please provide a description of the function:def mean(self, *args, **kwargs): nv.validate_window_func('mean', args, kwargs) return self._apply('ewma', **kwargs)
[ "\n Exponential weighted moving average.\n\n Parameters\n ----------\n *args, **kwargs\n Arguments and keyword arguments to be passed into func.\n " ]
Please provide a description of the function:def std(self, bias=False, *args, **kwargs): nv.validate_window_func('std', args, kwargs) return _zsqrt(self.var(bias=bias, **kwargs))
[ "\n Exponential weighted moving stddev.\n " ]
Please provide a description of the function:def var(self, bias=False, *args, **kwargs): nv.validate_window_func('var', args, kwargs) def f(arg): return libwindow.ewmcov(arg, arg, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return self._apply(f, **kwargs)
[ "\n Exponential weighted moving variance.\n " ]
Please provide a description of the function:def cov(self, other=None, pairwise=None, bias=False, **kwargs): if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_cov(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) cov = libwindow.ewmcov(X._prep_values(), Y._prep_values(), self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), int(bias)) return X._wrap_result(cov) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_cov, pairwise=bool(pairwise))
[ "\n Exponential weighted sample covariance.\n " ]
Please provide a description of the function:def corr(self, other=None, pairwise=None, **kwargs): if other is None: other = self._selected_obj # only default unset pairwise = True if pairwise is None else pairwise other = self._shallow_copy(other) def _get_corr(X, Y): X = self._shallow_copy(X) Y = self._shallow_copy(Y) def _cov(x, y): return libwindow.ewmcov(x, y, self.com, int(self.adjust), int(self.ignore_na), int(self.min_periods), 1) x_values = X._prep_values() y_values = Y._prep_values() with np.errstate(all='ignore'): cov = _cov(x_values, y_values) x_var = _cov(x_values, x_values) y_var = _cov(y_values, y_values) corr = cov / _zsqrt(x_var * y_var) return X._wrap_result(corr) return _flex_binary_moment(self._selected_obj, other._selected_obj, _get_corr, pairwise=bool(pairwise))
[ "\n Exponential weighted sample correlation.\n " ]
Please provide a description of the function:def _ensure_like_indices(time, panels): n_time = len(time) n_panel = len(panels) u_panels = np.unique(panels) # this sorts! u_time = np.unique(time) if len(u_time) == n_time: time = np.tile(u_time, len(u_panels)) if len(u_panels) == n_panel: panels = np.repeat(u_panels, len(u_time)) return time, panels
[ "\n Makes sure that time and panels are conformable.\n " ]
Please provide a description of the function:def panel_index(time, panels, names=None): if names is None: names = ['time', 'panel'] time, panels = _ensure_like_indices(time, panels) return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
[ "\n Returns a multi-index suitable for a panel-like DataFrame.\n\n Parameters\n ----------\n time : array-like\n Time index, does not have to repeat\n panels : array-like\n Panel index, does not have to repeat\n names : list, optional\n List containing the names of the indices\n\n Returns\n -------\n multi_index : MultiIndex\n Time index is the first level, the panels are the second level.\n\n Examples\n --------\n >>> years = range(1960,1963)\n >>> panels = ['A', 'B', 'C']\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),\n (1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),\n (1962, 'C')], dtype=object)\n\n or\n\n >>> years = np.repeat(range(1960,1963), 3)\n >>> panels = np.tile(['A', 'B', 'C'], 3)\n >>> panel_idx = panel_index(years, panels)\n >>> panel_idx\n MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),\n (1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),\n (1962, 'C')], dtype=object)\n " ]
Please provide a description of the function:def _init_data(self, data, copy, dtype, **kwargs): if data is None: data = {} if dtype is not None: dtype = self._validate_dtype(dtype) passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS] if kwargs: raise TypeError('_init_data() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) axes = None if isinstance(data, BlockManager): if com._any_not_none(*passed_axes): axes = [x if x is not None else y for x, y in zip(passed_axes, data.axes)] mgr = data elif isinstance(data, dict): mgr = self._init_dict(data, passed_axes, dtype=dtype) copy = False dtype = None elif isinstance(data, (np.ndarray, list)): mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy) copy = False dtype = None elif is_scalar(data) and com._all_not_none(*passed_axes): values = cast_scalar_to_array([len(x) for x in passed_axes], data, dtype=dtype) mgr = self._init_matrix(values, passed_axes, dtype=values.dtype, copy=False) copy = False else: # pragma: no cover raise ValueError('Panel constructor not properly called!') NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
[ "\n Generate ND initialization; axes are passed\n as required objects to __init__.\n " ]
Please provide a description of the function:def from_dict(cls, data, intersect=False, orient='items', dtype=None): from collections import defaultdict orient = orient.lower() if orient == 'minor': new_data = defaultdict(OrderedDict) for col, df in data.items(): for item, s in df.items(): new_data[item][col] = s data = new_data elif orient != 'items': # pragma: no cover raise ValueError('Orientation must be one of {items, minor}.') d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype) ks = list(d['data'].keys()) if not isinstance(d['data'], OrderedDict): ks = list(sorted(ks)) d[cls._info_axis_name] = Index(ks) return cls(**d)
[ "\n Construct Panel from dict of DataFrame objects.\n\n Parameters\n ----------\n data : dict\n {field : DataFrame}\n intersect : boolean\n Intersect indexes of input DataFrames\n orient : {'items', 'minor'}, default 'items'\n The \"orientation\" of the data. If the keys of the passed dict\n should be the items of the result panel, pass 'items'\n (default). Otherwise if the columns of the values of the passed\n DataFrame objects should be the items (which in the case of\n mixed-dtype data you should do), instead pass 'minor'\n dtype : dtype, default None\n Data type to force, otherwise infer\n\n Returns\n -------\n Panel\n " ]
Please provide a description of the function:def _get_plane_axes_index(self, axis): axis_name = self._get_axis_name(axis) if axis_name == 'major_axis': index = 'minor_axis' columns = 'items' if axis_name == 'minor_axis': index = 'major_axis' columns = 'items' elif axis_name == 'items': index = 'major_axis' columns = 'minor_axis' return index, columns
[ "\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes indexes.\n " ]
Please provide a description of the function:def _get_plane_axes(self, axis): return [self._get_axis(axi) for axi in self._get_plane_axes_index(axis)]
[ "\n Get my plane axes indexes: these are already\n (as compared with higher level planes),\n as we are returning a DataFrame axes.\n " ]
Please provide a description of the function:def to_excel(self, path, na_rep='', engine=None, **kwargs): from pandas.io.excel import ExcelWriter if isinstance(path, str): writer = ExcelWriter(path, engine=engine) else: writer = path kwargs['na_rep'] = na_rep for item, df in self.iteritems(): name = str(item) df.to_excel(writer, name, **kwargs) writer.save()
[ "\n Write each DataFrame in Panel to a separate excel sheet.\n\n Parameters\n ----------\n path : string or ExcelWriter object\n File path or existing ExcelWriter\n na_rep : string, default ''\n Missing data representation\n engine : string, default None\n write engine to use - you can also set this via the options\n ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and\n ``io.excel.xlsm.writer``.\n\n Other Parameters\n ----------------\n float_format : string, default None\n Format string for floating point numbers\n cols : sequence, optional\n Columns to write\n header : boolean or list of string, default True\n Write out column names. If a list of string is given it is\n assumed to be aliases for the column names\n index : boolean, default True\n Write row names (index)\n index_label : string or sequence, default None\n Column label for index column(s) if desired. If None is given, and\n `header` and `index` are True, then the index names are used. A\n sequence should be given if the DataFrame uses MultiIndex.\n startrow : upper left cell row to dump data frame\n startcol : upper left cell column to dump data frame\n\n Notes\n -----\n Keyword arguments (and na_rep) are passed to the ``to_excel`` method\n for each DataFrame written.\n " ]
Please provide a description of the function:def get_value(self, *args, **kwargs): warnings.warn("get_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._get_value(*args, **kwargs)
[ "\n Quickly retrieve single value at (item, major, minor) location.\n\n .. deprecated:: 0.21.0\n\n Please use .at[] or .iat[] accessors.\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n value : scalar value\n " ]
Please provide a description of the function:def set_value(self, *args, **kwargs): warnings.warn("set_value is deprecated and will be removed " "in a future release. Please use " ".at[] or .iat[] accessors instead", FutureWarning, stacklevel=2) return self._set_value(*args, **kwargs)
[ "\n Quickly set single value at (item, major, minor) location.\n\n .. deprecated:: 0.21.0\n\n Please use .at[] or .iat[] accessors.\n\n Parameters\n ----------\n item : item label (panel item)\n major : major axis label (panel item row)\n minor : minor axis label (panel item column)\n value : scalar\n takeable : interpret the passed labels as indexers, default False\n\n Returns\n -------\n panel : Panel\n If label combo is contained, will be reference to calling Panel,\n otherwise a new object.\n " ]
Please provide a description of the function:def _unpickle_panel_compat(self, state): # pragma: no cover from pandas.io.pickle import _unpickle_array _unpickle = _unpickle_array vals, items, major, minor = state items = _unpickle(items) major = _unpickle(major) minor = _unpickle(minor) values = _unpickle(vals) wp = Panel(values, items, major, minor) self._data = wp._data
[ "\n Unpickle the panel.\n " ]
Please provide a description of the function:def conform(self, frame, axis='items'): axes = self._get_plane_axes(axis) return frame.reindex(**self._extract_axes_for_slice(self, axes))
[ "\n Conform input DataFrame to align with chosen axis pair.\n\n Parameters\n ----------\n frame : DataFrame\n axis : {'items', 'major', 'minor'}\n\n Axis the input corresponds to. E.g., if axis='major', then\n the frame's columns would be items, and the index would be\n values of the minor axis\n\n Returns\n -------\n DataFrame\n " ]
Please provide a description of the function:def round(self, decimals=0, *args, **kwargs): nv.validate_round(args, kwargs) if is_integer(decimals): result = np.apply_along_axis(np.round, 0, self.values) return self._wrap_result(result, axis=0) raise TypeError("decimals must be an integer")
[ "\n Round each value in Panel to a specified number of decimal places.\n\n .. versionadded:: 0.18.0\n\n Parameters\n ----------\n decimals : int\n Number of decimal places to round to (default: 0).\n If decimals is negative, it specifies the number of\n positions to the left of the decimal point.\n\n Returns\n -------\n Panel object\n\n See Also\n --------\n numpy.around\n " ]
Please provide a description of the function:def dropna(self, axis=0, how='any', inplace=False): axis = self._get_axis_number(axis) values = self.values mask = notna(values) for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) if how == 'all': cond = mask > 0 else: cond = mask == per_slice new_ax = self._get_axis(axis)[cond] result = self.reindex_axis(new_ax, axis=axis) if inplace: self._update_inplace(result) else: return result
[ "\n Drop 2D from panel, holding passed axis constant.\n\n Parameters\n ----------\n axis : int, default 0\n Axis to hold constant. E.g. axis=1 will drop major_axis entries\n having a certain amount of NA data\n how : {'all', 'any'}, default 'any'\n 'any': one or more values are NA in the DataFrame along the\n axis. For 'all' they all must be.\n inplace : bool, default False\n If True, do operation inplace and return None.\n\n Returns\n -------\n dropped : Panel\n " ]
Please provide a description of the function:def xs(self, key, axis=1): axis = self._get_axis_number(axis) if axis == 0: return self[key] self._consolidate_inplace() axis_number = self._get_axis_number(axis) new_data = self._data.xs(key, axis=axis_number, copy=False) result = self._construct_return_type(new_data) copy = new_data.is_mixed_type result._set_is_copy(self, copy=copy) return result
[ "\n Return slice of panel along selected axis.\n\n Parameters\n ----------\n key : object\n Label\n axis : {'items', 'major', 'minor}, default 1/'major'\n\n Returns\n -------\n y : ndim(self)-1\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels and is a superset of xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n " ]
Please provide a description of the function:def _ixs(self, i, axis=0): ax = self._get_axis(axis) key = ax[i] # xs cannot handle a non-scalar key, so just reindex here # if we have a multi-index and a single tuple, then its a reduction # (GH 7516) if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)): if is_list_like(key): indexer = {self._get_axis_name(axis): key} return self.reindex(**indexer) # a reduction if axis == 0: values = self._data.iget(i) return self._box_item_values(key, values) # xs by position self._consolidate_inplace() new_data = self._data.xs(i, axis=axis, copy=True, takeable=True) return self._construct_return_type(new_data)
[ "\n Parameters\n ----------\n i : int, slice, or sequence of integers\n axis : int\n " ]
Please provide a description of the function:def to_frame(self, filter_observations=True): _, N, K = self.shape if filter_observations: # shaped like the return DataFrame mask = notna(self.values).all(axis=0) # size = mask.sum() selector = mask.ravel() else: # size = N * K selector = slice(None, None) data = {item: self[item].values.ravel()[selector] for item in self.items} def construct_multi_parts(idx, n_repeat, n_shuffle=1): # Replicates and shuffles MultiIndex, returns individual attributes codes = [np.repeat(x, n_repeat) for x in idx.codes] # Assumes that each label is divisible by n_shuffle codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes] codes = [x[selector] for x in codes] levels = idx.levels names = idx.names return codes, levels, names def construct_index_parts(idx, major=True): levels = [idx] if major: codes = [np.arange(N).repeat(K)[selector]] names = idx.name or 'major' else: codes = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)] codes = [codes.ravel()[selector]] names = idx.name or 'minor' names = [names] return codes, levels, names if isinstance(self.major_axis, MultiIndex): major_codes, major_levels, major_names = construct_multi_parts( self.major_axis, n_repeat=K) else: major_codes, major_levels, major_names = construct_index_parts( self.major_axis) if isinstance(self.minor_axis, MultiIndex): minor_codes, minor_levels, minor_names = construct_multi_parts( self.minor_axis, n_repeat=N, n_shuffle=K) else: minor_codes, minor_levels, minor_names = construct_index_parts( self.minor_axis, major=False) levels = major_levels + minor_levels codes = major_codes + minor_codes names = major_names + minor_names index = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False) return DataFrame(data, index=index, columns=self.items)
[ "\n Transform wide format into long (stacked) format as DataFrame whose\n columns are the Panel's items and whose index is a MultiIndex formed\n of the Panel's major and minor axes.\n\n Parameters\n ----------\n filter_observations : boolean, default True\n Drop (major, minor) pairs without a complete set of observations\n across all the items\n\n Returns\n -------\n y : DataFrame\n " ]
Please provide a description of the function:def apply(self, func, axis='major', **kwargs): if kwargs and not isinstance(func, np.ufunc): f = lambda x: func(x, **kwargs) else: f = func # 2d-slabs if isinstance(axis, (tuple, list)) and len(axis) == 2: return self._apply_2d(f, axis=axis) axis = self._get_axis_number(axis) # try ufunc like if isinstance(f, np.ufunc): try: with np.errstate(all='ignore'): result = np.apply_along_axis(func, axis, self.values) return self._wrap_result(result, axis=axis) except (AttributeError): pass # 1d return self._apply_1d(f, axis=axis)
[ "\n Apply function along axis (or axes) of the Panel.\n\n Parameters\n ----------\n func : function\n Function to apply to each combination of 'other' axes\n e.g. if axis = 'items', the combination of major_axis/minor_axis\n will each be passed as a Series; if axis = ('items', 'major'),\n DataFrames of items & major axis will be passed\n axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two\n axes\n **kwargs\n Additional keyword arguments will be passed to the function.\n\n Returns\n -------\n result : Panel, DataFrame, or Series\n\n Examples\n --------\n\n Returns a Panel with the square root of each element\n\n >>> p = pd.Panel(np.random.rand(4, 3, 2)) # doctest: +SKIP\n >>> p.apply(np.sqrt)\n\n Equivalent to p.sum(1), returning a DataFrame\n\n >>> p.apply(lambda x: x.sum(), axis=1) # doctest: +SKIP\n\n Equivalent to previous:\n\n >>> p.apply(lambda x: x.sum(), axis='major') # doctest: +SKIP\n\n Return the shapes of each DataFrame over axis 2 (i.e the shapes of\n items x major), as a Series\n\n >>> p.apply(lambda x: x.shape, axis=(0,1)) # doctest: +SKIP\n " ]
Please provide a description of the function:def _apply_2d(self, func, axis): ndim = self.ndim axis = [self._get_axis_number(a) for a in axis] # construct slabs, in 2-d this is a DataFrame result indexer_axis = list(range(ndim)) for a in axis: indexer_axis.remove(a) indexer_axis = indexer_axis[0] slicer = [slice(None, None)] * ndim ax = self._get_axis(indexer_axis) results = [] for i, e in enumerate(ax): slicer[indexer_axis] = i sliced = self.iloc[tuple(slicer)] obj = func(sliced) results.append((e, obj)) return self._construct_return_type(dict(results))
[ "\n Handle 2-d slices, equiv to iterating over the other axis.\n " ]
Please provide a description of the function:def _construct_return_type(self, result, axes=None): ndim = getattr(result, 'ndim', None) # need to assume they are the same if ndim is None: if isinstance(result, dict): ndim = getattr(list(result.values())[0], 'ndim', 0) # have a dict, so top-level is +1 dim if ndim != 0: ndim += 1 # scalar if ndim == 0: return Series(result) # same as self elif self.ndim == ndim: # return the construction dictionary for these axes if axes is None: return self._constructor(result) return self._constructor(result, **self._construct_axes_dict()) # sliced elif self.ndim == ndim + 1: if axes is None: return self._constructor_sliced(result) return self._constructor_sliced( result, **self._extract_axes_for_slice(self, axes)) raise ValueError('invalid _construct_return_type [self->{self}] ' '[result->{result}]'.format(self=self, result=result))
[ "\n Return the type for the ndim of the result.\n " ]
Please provide a description of the function:def count(self, axis='major'): i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype='int64') return self._wrap_result(result, axis)
[ "\n Return number of observations over requested axis.\n\n Parameters\n ----------\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n count : DataFrame\n " ]
Please provide a description of the function:def shift(self, periods=1, freq=None, axis='major'): if freq: return self.tshift(periods, freq, axis=axis) return super().slice_shift(periods, axis=axis)
[ "\n Shift index by desired number of periods with an optional time freq.\n\n The shifted data will not include the dropped periods and the\n shifted axis will be smaller than the original. This is different\n from the behavior of DataFrame.shift()\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n freq : DateOffset, timedelta, or time rule string, optional\n axis : {'items', 'major', 'minor'} or {0, 1, 2}\n\n Returns\n -------\n shifted : Panel\n " ]
Please provide a description of the function:def join(self, other, how='left', lsuffix='', rsuffix=''): from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) this = self.reindex(major=join_major, minor=join_minor) other = other.reindex(major=join_major, minor=join_minor) merged_data = this._data.merge(other._data, lsuffix, rsuffix) return self._constructor(merged_data) else: if lsuffix or rsuffix: raise ValueError('Suffixes not supported when passing ' 'multiple panels') if how == 'left': how = 'outer' join_axes = [self.major_axis, self.minor_axis] elif how == 'right': raise ValueError('Right join not supported with multiple ' 'panels') else: join_axes = None return concat([self] + list(other), axis=0, join=how, join_axes=join_axes, verify_integrity=True)
[ "\n Join items with other Panel either on major and minor axes column.\n\n Parameters\n ----------\n other : Panel or list of Panels\n Index should be similar to one of the columns in this one\n how : {'left', 'right', 'outer', 'inner'}\n How to handle indexes of the two objects. Default: 'left'\n for joining on index, None otherwise\n * left: use calling frame's index\n * right: use input frame's index\n * outer: form union of indexes\n * inner: use intersection of indexes\n lsuffix : string\n Suffix to use from left frame's overlapping columns\n rsuffix : string\n Suffix to use from right frame's overlapping columns\n\n Returns\n -------\n joined : Panel\n " ]
Please provide a description of the function:def update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore'): if not isinstance(other, self._constructor): other = self._constructor(other) axis_name = self._info_axis_name axis_values = self._info_axis other = other.reindex(**{axis_name: axis_values}) for frame in axis_values: self[frame].update(other[frame], join=join, overwrite=overwrite, filter_func=filter_func, errors=errors)
[ "\n Modify Panel in place using non-NA values from other Panel.\n\n May also use object coercible to Panel. Will align on items.\n\n Parameters\n ----------\n other : Panel, or object coercible to Panel\n The object from which the caller will be udpated.\n join : {'left', 'right', 'outer', 'inner'}, default 'left'\n How individual DataFrames are joined.\n overwrite : bool, default True\n If True then overwrite values for common keys in the calling Panel.\n filter_func : callable(1d-array) -> 1d-array<bool>, default None\n Can choose to replace values other than NA. Return True for values\n that should be updated.\n errors : {'raise', 'ignore'}, default 'ignore'\n If 'raise', will raise an error if a DataFrame and other both.\n\n .. versionchanged :: 0.24.0\n Changed from `raise_conflict=False|True`\n to `errors='ignore'|'raise'`.\n\n See Also\n --------\n DataFrame.update : Similar method for DataFrames.\n dict.update : Similar method for dictionaries.\n " ]
Please provide a description of the function:def _extract_axes(self, data, axes, **kwargs): return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)]
[ "\n Return a list of the axis indices.\n " ]
Please provide a description of the function:def _extract_axes_for_slice(self, axes): return {self._AXIS_SLICEMAP[i]: a for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)}
[ "\n Return the slice dictionary for these axes.\n " ]
Please provide a description of the function:def _homogenize_dict(self, frames, intersect=True, dtype=None): result = dict() # caller differs dict/ODict, preserved type if isinstance(frames, OrderedDict): result = OrderedDict() adj_frames = OrderedDict() for k, v in frames.items(): if isinstance(v, dict): adj_frames[k] = self._constructor_sliced(v) else: adj_frames[k] = v axes = self._AXIS_ORDERS[1:] axes_dict = {a: ax for a, ax in zip(axes, self._extract_axes( self, adj_frames, axes, intersect=intersect))} reindex_dict = {self._AXIS_SLICEMAP[a]: axes_dict[a] for a in axes} reindex_dict['copy'] = False for key, frame in adj_frames.items(): if frame is not None: result[key] = frame.reindex(**reindex_dict) else: result[key] = None axes_dict['data'] = result axes_dict['dtype'] = dtype return axes_dict
[ "\n Conform set of _constructor_sliced-like objects to either\n an intersection of indices / columns or a union.\n\n Parameters\n ----------\n frames : dict\n intersect : boolean, default True\n\n Returns\n -------\n dict of aligned results & indices\n " ]
Please provide a description of the function:def get_group_index(labels, shape, sort, xnull): def _int64_cut_off(shape): acc = 1 for i, mul in enumerate(shape): acc *= int(mul) if not acc < _INT64_MAX: return i return len(shape) def maybe_lift(lab, size): # promote nan values (assigned -1 label in lab array) # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) labels = map(ensure_int64, labels) if not xnull: labels, shape = map(list, zip(*map(maybe_lift, labels, shape))) labels = list(labels) shape = list(shape) # Iteratively process all the labels in chunks sized so less # than _INT64_MAX unique int ids will be required for each chunk while True: # how many levels can be done without overflow: nlev = _int64_cut_off(shape) # compute flat ids for the first `nlev` levels stride = np.prod(shape[1:nlev], dtype='i8') out = stride * labels[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): if shape[i] == 0: stride = 0 else: stride //= shape[i] out += labels[i] * stride if xnull: # exclude nulls mask = labels[0] == -1 for lab in labels[1:nlev]: mask |= lab == -1 out[mask] = -1 if nlev == len(shape): # all levels done! break # compress what has been done so far in order to avoid overflow # to retain lexical ranks, obs_ids should be sorted comp_ids, obs_ids = compress_group_index(out, sort=sort) labels = [comp_ids] + labels[nlev:] shape = [len(obs_ids)] + shape[nlev:] return out
[ "\n For the particular label_list, gets the offsets into the hypothetical list\n representing the totally ordered cartesian product of all possible label\n combinations, *as long as* this space fits within int64 bounds;\n otherwise, though group indices identify unique combinations of\n labels, they cannot be deconstructed.\n - If `sort`, rank of returned ids preserve lexical ranks of labels.\n i.e. returned id's can be used to do lexical sort on labels;\n - If `xnull` nulls (-1 labels) are passed through.\n\n Parameters\n ----------\n labels: sequence of arrays\n Integers identifying levels at each location\n shape: sequence of ints same length as labels\n Number of unique levels at each location\n sort: boolean\n If the ranks of returned ids should match lexical ranks of labels\n xnull: boolean\n If true nulls are excluded. i.e. -1 values in the labels are\n passed through\n Returns\n -------\n An array of type int64 where two elements are equal if their corresponding\n labels are equal at all location.\n " ]
Please provide a description of the function:def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull): if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8') shape = np.asarray(shape, dtype='i8') + lift if not is_int64_overflow_possible(shape): # obs ids are deconstructable! take the fast route! out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() \ else [x - y for x, y in zip(out, lift)] i = unique_label_indices(comp_ids) i8copy = lambda a: a.astype('i8', subok=False, copy=True) return [i8copy(lab[i]) for lab in labels]
[ "\n reconstruct labels from observed group ids\n\n Parameters\n ----------\n xnull: boolean,\n if nulls are excluded; i.e. -1 labels are passed through\n " ]
Please provide a description of the function:def nargsort(items, kind='quicksort', ascending=True, na_position='last'): # specially handle Categorical if is_categorical_dtype(items): if na_position not in {'first', 'last'}: raise ValueError('invalid na_position: {!r}'.format(na_position)) mask = isna(items) cnt_null = mask.sum() sorted_idx = items.argsort(ascending=ascending, kind=kind) if ascending and na_position == 'last': # NaN is coded as -1 and is listed in front after sorting sorted_idx = np.roll(sorted_idx, -cnt_null) elif not ascending and na_position == 'first': # NaN is coded as -1 and is listed in the end after sorting sorted_idx = np.roll(sorted_idx, cnt_null) return sorted_idx with warnings.catch_warnings(): # https://github.com/pandas-dev/pandas/issues/25439 # can be removed once ExtensionArrays are properly handled by nargsort warnings.filterwarnings( "ignore", category=FutureWarning, message="Converting timezone-aware DatetimeArray to") items = np.asanyarray(items) idx = np.arange(len(items)) mask = isna(items) non_nans = items[~mask] non_nan_idx = idx[~mask] nan_idx = np.nonzero(mask)[0] if not ascending: non_nans = non_nans[::-1] non_nan_idx = non_nan_idx[::-1] indexer = non_nan_idx[non_nans.argsort(kind=kind)] if not ascending: indexer = indexer[::-1] # Finally, place the NaNs at the end or the beginning according to # na_position if na_position == 'last': indexer = np.concatenate([indexer, nan_idx]) elif na_position == 'first': indexer = np.concatenate([nan_idx, indexer]) else: raise ValueError('invalid na_position: {!r}'.format(na_position)) return indexer
[ "\n This is intended to be a drop-in replacement for np.argsort which\n handles NaNs. It adds ascending and na_position parameters.\n GH #6399, #5231\n " ]
Please provide a description of the function:def get_indexer_dict(label_list, keys): shape = list(map(len, keys)) group_index = get_group_index(label_list, shape, sort=True, xnull=True) ngroups = ((group_index.size and group_index.max()) + 1) \ if is_int64_overflow_possible(shape) \ else np.prod(shape, dtype='i8') sorter = get_group_index_sorter(group_index, ngroups) sorted_labels = [lab.take(sorter) for lab in label_list] group_index = group_index.take(sorter) return lib.indices_fast(sorter, group_index, keys, sorted_labels)
[ " return a diction of {labels} -> {indexers} " ]
Please provide a description of the function:def get_group_index_sorter(group_index, ngroups): count = len(group_index) alpha = 0.0 # taking complexities literally; there may be beta = 1.0 # some room for fine-tuning these parameters do_groupsort = (count > 0 and ((alpha + beta * ngroups) < (count * np.log(count)))) if do_groupsort: sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) return ensure_platform_int(sorter) else: return group_index.argsort(kind='mergesort')
[ "\n algos.groupsort_indexer implements `counting sort` and it is at least\n O(ngroups), where\n ngroups = prod(shape)\n shape = map(len, keys)\n that is, linear in the number of combinations (cartesian product) of unique\n values of groupby keys. This can be huge when doing multi-key groupby.\n np.argsort(kind='mergesort') is O(count x log(count)) where count is the\n length of the data-frame;\n Both algorithms are `stable` sort and that is necessary for correctness of\n groupby operations. e.g. consider:\n df.groupby(key)[col].transform('first')\n " ]
Please provide a description of the function:def compress_group_index(group_index, sort=True): size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) group_index = ensure_int64(group_index) # note, group labels come out ascending (ie, 1,2,3 etc) comp_ids, obs_group_ids = table.get_labels_groupby(group_index) if sort and len(obs_group_ids) > 0: obs_group_ids, comp_ids = _reorder_by_uniques(obs_group_ids, comp_ids) return comp_ids, obs_group_ids
[ "\n Group_index is offsets into cartesian product of all possible labels. This\n space can be huge, so this function compresses it, by computing offsets\n (comp_ids) into the list of unique labels (obs_group_ids).\n " ]
Please provide a description of the function:def safe_sort(values, labels=None, na_sentinel=-1, assume_unique=False): if not is_list_like(values): raise TypeError("Only list-like objects are allowed to be passed to" "safe_sort as values") if not isinstance(values, np.ndarray): # don't convert to string types dtype, _ = infer_dtype_from_array(values) values = np.asarray(values, dtype=dtype) def sort_mixed(values): # order ints before strings, safe in py3 str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) nums = np.sort(values[~str_pos]) strs = np.sort(values[str_pos]) return np.concatenate([nums, np.asarray(strs, dtype=object)]) sorter = None if lib.infer_dtype(values, skipna=False) == 'mixed-integer': # unorderable in py3 if mixed str/int ordered = sort_mixed(values) else: try: sorter = values.argsort() ordered = values.take(sorter) except TypeError: # try this anyway ordered = sort_mixed(values) # labels: if labels is None: return ordered if not is_list_like(labels): raise TypeError("Only list-like objects or None are allowed to be" "passed to safe_sort as labels") labels = ensure_platform_int(np.asarray(labels)) from pandas import Index if not assume_unique and not Index(values).is_unique: raise ValueError("values should be unique if labels is not None") if sorter is None: # mixed types (hash_klass, _), values = algorithms._get_data_algo( values, algorithms._hashtables) t = hash_klass(len(values)) t.map_locations(values) sorter = ensure_platform_int(t.lookup(ordered)) reverse_indexer = np.empty(len(sorter), dtype=np.int_) reverse_indexer.put(sorter, np.arange(len(sorter))) mask = (labels < -len(values)) | (labels >= len(values)) | \ (labels == na_sentinel) # (Out of bound indices will be masked with `na_sentinel` next, so we may # deal with them here without performance loss using `mode='wrap'`.) new_labels = reverse_indexer.take(labels, mode='wrap') np.putmask(new_labels, mask, na_sentinel) return ordered, ensure_platform_int(new_labels)
[ "\n Sort ``values`` and reorder corresponding ``labels``.\n ``values`` should be unique if ``labels`` is not None.\n Safe for use with mixed types (int, str), orders ints before strs.\n\n .. versionadded:: 0.19.0\n\n Parameters\n ----------\n values : list-like\n Sequence; must be unique if ``labels`` is not None.\n labels : list_like\n Indices to ``values``. All out of bound indices are treated as\n \"not found\" and will be masked with ``na_sentinel``.\n na_sentinel : int, default -1\n Value in ``labels`` to mark \"not found\".\n Ignored when ``labels`` is None.\n assume_unique : bool, default False\n When True, ``values`` are assumed to be unique, which can speed up\n the calculation. Ignored when ``labels`` is None.\n\n Returns\n -------\n ordered : ndarray\n Sorted ``values``\n new_labels : ndarray\n Reordered ``labels``; returned when ``labels`` is not None.\n\n Raises\n ------\n TypeError\n * If ``values`` is not list-like or if ``labels`` is neither None\n nor list-like\n * If ``values`` cannot be sorted\n ValueError\n * If ``labels`` is not None and ``values`` contain duplicates.\n " ]
Please provide a description of the function:def _check_ne_builtin_clash(expr): names = expr.names overlap = names & _ne_builtins if overlap: s = ', '.join(map(repr, overlap)) raise NumExprClobberingError('Variables in expression "{expr}" ' 'overlap with builtins: ({s})' .format(expr=expr, s=s))
[ "Attempt to prevent foot-shooting in a helpful way.\n\n Parameters\n ----------\n terms : Term\n Terms can contain\n " ]
Please provide a description of the function:def evaluate(self): if not self._is_aligned: self.result_type, self.aligned_axes = _align(self.expr.terms) # make sure no names in resolvers and locals/globals clash res = self._evaluate() return _reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type)
[ "Run the engine on the expression\n\n This method performs alignment which is necessary no matter what engine\n is being used, thus its implementation is in the base class.\n\n Returns\n -------\n obj : object\n The result of the passed expression.\n " ]
Please provide a description of the function:def get_block_type(values, dtype=None): dtype = dtype or values.dtype vtype = dtype.type if is_sparse(dtype): # Need this first(ish) so that Sparse[datetime] is sparse cls = ExtensionBlock elif is_categorical(values): cls = CategoricalBlock elif issubclass(vtype, np.datetime64): assert not is_datetime64tz_dtype(values) cls = DatetimeBlock elif is_datetime64tz_dtype(values): cls = DatetimeTZBlock elif is_interval_dtype(dtype) or is_period_dtype(dtype): cls = ObjectValuesExtensionBlock elif is_extension_array_dtype(values): cls = ExtensionBlock elif issubclass(vtype, np.floating): cls = FloatBlock elif issubclass(vtype, np.timedelta64): assert issubclass(vtype, np.integer) cls = TimeDeltaBlock elif issubclass(vtype, np.complexfloating): cls = ComplexBlock elif issubclass(vtype, np.integer): cls = IntBlock elif dtype == np.bool_: cls = BoolBlock else: cls = ObjectBlock return cls
[ "\n Find the appropriate Block subclass to use for the given values and dtype.\n\n Parameters\n ----------\n values : ndarray-like\n dtype : numpy or pandas dtype\n\n Returns\n -------\n cls : class, subclass of Block\n " ]
Please provide a description of the function:def _extend_blocks(result, blocks=None): from pandas.core.internals import BlockManager if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) elif isinstance(result, BlockManager): blocks.extend(result.blocks) else: blocks.append(result) return blocks
[ " return a new extended blocks, givin the result " ]
Please provide a description of the function:def _block_shape(values, ndim=1, shape=None): if values.ndim < ndim: if shape is None: shape = values.shape if not is_extension_array_dtype(values): # TODO: https://github.com/pandas-dev/pandas/issues/23023 # block.shape is incorrect for "2D" ExtensionArrays # We can't, and don't need to, reshape. values = values.reshape(tuple((1, ) + shape)) return values
[ " guarantee the shape of the values to be at least 1 d " ]
Please provide a description of the function:def _safe_reshape(arr, new_shape): if isinstance(arr, ABCSeries): arr = arr._values if not isinstance(arr, ABCExtensionArray): arr = arr.reshape(new_shape) return arr
[ "\n If possible, reshape `arr` to have shape `new_shape`,\n with a couple of exceptions (see gh-13012):\n\n 1) If `arr` is a ExtensionArray or Index, `arr` will be\n returned as is.\n 2) If `arr` is a Series, the `_values` attribute will\n be reshaped and returned.\n\n Parameters\n ----------\n arr : array-like, object to be reshaped\n new_shape : int or tuple of ints, the new shape\n " ]
Please provide a description of the function:def _putmask_smart(v, m, n): # we cannot use np.asarray() here as we cannot have conversions # that numpy does when numeric are mixed with strings # n should be the length of the mask or a scalar here if not is_list_like(n): n = np.repeat(n, len(m)) elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar n = np.repeat(np.array(n, ndmin=1), len(m)) # see if we are only masking values that if putted # will work in the current dtype try: nn = n[m] # make sure that we have a nullable type # if we have nulls if not _isna_compat(v, nn[0]): raise ValueError # we ignore ComplexWarning here with warnings.catch_warnings(record=True): warnings.simplefilter("ignore", np.ComplexWarning) nn_at = nn.astype(v.dtype) # avoid invalid dtype comparisons # between numbers & strings # only compare integers/floats # don't compare integers to datetimelikes if (not is_numeric_v_string_like(nn, nn_at) and (is_float_dtype(nn.dtype) or is_integer_dtype(nn.dtype) and is_float_dtype(nn_at.dtype) or is_integer_dtype(nn_at.dtype))): comp = (nn == nn_at) if is_list_like(comp) and comp.all(): nv = v.copy() nv[m] = nn_at return nv except (ValueError, IndexError, TypeError, OverflowError): pass n = np.asarray(n) def _putmask_preserve(nv, n): try: nv[m] = n[m] except (IndexError, ValueError): nv[m] = n return nv # preserves dtype if possible if v.dtype.kind == n.dtype.kind: return _putmask_preserve(v, n) # change the dtype if needed dtype, _ = maybe_promote(n.dtype) if is_extension_type(v.dtype) and is_object_dtype(dtype): v = v.get_values(dtype) else: v = v.astype(dtype) return _putmask_preserve(v, n)
[ "\n Return a new ndarray, try to preserve dtype if possible.\n\n Parameters\n ----------\n v : `values`, updated in-place (array like)\n m : `mask`, applies to both sides (array like)\n n : `new values` either scalar or an array like aligned with `values`\n\n Returns\n -------\n values : ndarray with updated values\n this *may* be a copy of the original\n\n See Also\n --------\n ndarray.putmask\n " ]
Please provide a description of the function:def _check_ndim(self, values, ndim): if ndim is None: ndim = values.ndim if self._validate_ndim and values.ndim != ndim: msg = ("Wrong number of dimensions. values.ndim != ndim " "[{} != {}]") raise ValueError(msg.format(values.ndim, ndim)) return ndim
[ "\n ndim inference and validation.\n\n Infers ndim from 'values' if not provided to __init__.\n Validates that values.ndim and ndim are consistent if and only if\n the class variable '_validate_ndim' is True.\n\n Parameters\n ----------\n values : array-like\n ndim : int or None\n\n Returns\n -------\n ndim : int\n\n Raises\n ------\n ValueError : the number of dimensions do not match\n " ]
Please provide a description of the function:def is_categorical_astype(self, dtype): if dtype is Categorical or dtype is CategoricalDtype: # this is a pd.Categorical, but is not # a valid type for astypeing raise TypeError("invalid type {0} for astype".format(dtype)) elif is_categorical_dtype(dtype): return True return False
[ "\n validate that we have a astypeable to categorical,\n returns a boolean if we are a categorical\n " ]
Please provide a description of the function:def get_values(self, dtype=None): if is_object_dtype(dtype): return self.values.astype(object) return self.values
[ "\n return an internal format, currently just the ndarray\n this is often overridden to handle to_dense like operations\n " ]
Please provide a description of the function:def make_block(self, values, placement=None, ndim=None): if placement is None: placement = self.mgr_locs if ndim is None: ndim = self.ndim return make_block(values, placement=placement, ndim=ndim)
[ "\n Create a new block, with type inference propagate any values that are\n not specified\n " ]
Please provide a description of the function:def make_block_same_class(self, values, placement=None, ndim=None, dtype=None): if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn("dtype argument is deprecated, will be removed " "in a future release.", DeprecationWarning) if placement is None: placement = self.mgr_locs return make_block(values, placement=placement, ndim=ndim, klass=self.__class__, dtype=dtype)
[ " Wrap given values in a block of same type as self. " ]
Please provide a description of the function:def getitem_block(self, slicer, new_mgr_locs=None): if new_mgr_locs is None: if isinstance(slicer, tuple): axis0_slicer = slicer[0] else: axis0_slicer = slicer new_mgr_locs = self.mgr_locs[axis0_slicer] new_values = self._slice(slicer) if self._validate_ndim and new_values.ndim != self.ndim: raise ValueError("Only same dim slicing is allowed") return self.make_block_same_class(new_values, new_mgr_locs)
[ "\n Perform __getitem__-like, return result as block.\n\n As of now, only supports slices that preserve dimensionality.\n " ]
Please provide a description of the function:def concat_same_type(self, to_concat, placement=None): values = self._concatenator([blk.values for blk in to_concat], axis=self.ndim - 1) return self.make_block_same_class( values, placement=placement or slice(0, len(values), 1))
[ "\n Concatenate list of single blocks of the same type.\n " ]
Please provide a description of the function:def delete(self, loc): self.values = np.delete(self.values, loc, 0) self.mgr_locs = self.mgr_locs.delete(loc)
[ "\n Delete given loc(-s) from block in-place.\n " ]