text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Get the location of the first missing value. <END_TASK> <USER_TASK:> Description: def _first_fill_value_loc(self): """ Get the location of the first missing value. Returns ------- int """
if len(self) == 0 or self.sp_index.npoints == len(self): return -1 indices = self.sp_index.to_int_index().indices if not len(indices) or indices[0] > 0: return 0 diff = indices[1:] - indices[:-1] return np.searchsorted(diff, 2) + 1
<SYSTEM_TASK:> Returns a Series containing counts of unique values. <END_TASK> <USER_TASK:> Description: def value_counts(self, dropna=True): """ Returns a Series containing counts of unique values. Parameters ---------- dropna : boolean, default True Don't include counts of NaN, even if NaN is in sp_values. Returns ------- counts : Series """
from pandas import Index, Series keys, counts = algos._value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0: if self._null_fill_value and dropna: pass else: if self._null_fill_value: mask = isna(keys) else: mask = keys == self.fill_value if mask.any(): counts[mask] += fcounts else: keys = np.insert(keys, 0, self.fill_value) counts = np.insert(counts, 0, fcounts) if not isinstance(keys, ABCIndexClass): keys = Index(keys) result = Series(counts, index=keys) return result
<SYSTEM_TASK:> Change the dtype of a SparseArray. <END_TASK> <USER_TASK:> Description: def astype(self, dtype=None, copy=True): """ Change the dtype of a SparseArray. The output will always be a SparseArray. To convert to a dense ndarray with a certain dtype, use :meth:`numpy.asarray`. Parameters ---------- dtype : np.dtype or ExtensionDtype For SparseDtype, this changes the dtype of ``self.sp_values`` and the ``self.fill_value``. For other dtypes, this only changes the dtype of ``self.sp_values``. copy : bool, default True Whether to ensure a copy is made, even if not necessary. Returns ------- SparseArray Examples -------- >>> arr = SparseArray([0, 0, 1, 2]) >>> arr [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) >>> arr.astype(np.dtype('int32')) [0, 0, 1, 2] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Using a NumPy dtype with a different kind (e.g. float) will coerce just ``self.sp_values``. >>> arr.astype(np.dtype('float64')) ... # doctest: +NORMALIZE_WHITESPACE [0, 0, 1.0, 2.0] Fill: 0 IntIndex Indices: array([2, 3], dtype=int32) Use a SparseDtype if you wish to be change the fill value as well. >>> arr.astype(SparseDtype("float64", fill_value=np.nan)) ... # doctest: +NORMALIZE_WHITESPACE [nan, nan, 1.0, 2.0] Fill: nan IntIndex Indices: array([2, 3], dtype=int32) """
dtype = self.dtype.update_dtype(dtype) subtype = dtype._subtype_with_str sp_values = astype_nansafe(self.sp_values, subtype, copy=copy) if sp_values is self.sp_values and copy: sp_values = sp_values.copy() return self._simple_new(sp_values, self.sp_index, dtype)
<SYSTEM_TASK:> Tests whether all elements evaluate True <END_TASK> <USER_TASK:> Description: def all(self, axis=None, *args, **kwargs): """ Tests whether all elements evaluate True Returns ------- all : bool See Also -------- numpy.all """
nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and not np.all(self.fill_value): return False return values.all()
<SYSTEM_TASK:> Tests whether at least one of elements evaluate True <END_TASK> <USER_TASK:> Description: def any(self, axis=0, *args, **kwargs): """ Tests whether at least one of elements evaluate True Returns ------- any : bool See Also -------- numpy.any """
nv.validate_any(args, kwargs) values = self.sp_values if len(values) != len(self) and np.any(self.fill_value): return True return values.any().item()
<SYSTEM_TASK:> Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise <END_TASK> <USER_TASK:> Description: def _replace_booleans(tok): """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise precedence is changed to boolean precedence. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values """
toknum, tokval = tok if toknum == tokenize.OP: if tokval == '&': return tokenize.NAME, 'and' elif tokval == '|': return tokenize.NAME, 'or' return toknum, tokval return toknum, tokval
<SYSTEM_TASK:> Replace local variables with a syntactically valid name. <END_TASK> <USER_TASK:> Description: def _replace_locals(tok): """Replace local variables with a syntactically valid name. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values Notes ----- This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as ``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_`` is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it. """
toknum, tokval = tok if toknum == tokenize.OP and tokval == '@': return tokenize.OP, _LOCAL_TAG return toknum, tokval
<SYSTEM_TASK:> Clean up a column name if surrounded by backticks. <END_TASK> <USER_TASK:> Description: def _clean_spaces_backtick_quoted_names(tok): """Clean up a column name if surrounded by backticks. Backtick quoted string are indicated by a certain tokval value. If a string is a backtick quoted token it will processed by :func:`_remove_spaces_column_name` so that the parser can find this string when the query is executed. See also :meth:`NDFrame._get_space_character_free_column_resolver`. Parameters ---------- tok : tuple of int, str ints correspond to the all caps constants in the tokenize module Returns ------- t : tuple of int, str Either the input or token or the replacement values """
toknum, tokval = tok if toknum == _BACKTICK_QUOTED_STRING: return tokenize.NAME, _remove_spaces_column_name(tokval) return toknum, tokval
<SYSTEM_TASK:> Compose a collection of tokenization functions <END_TASK> <USER_TASK:> Description: def _preparse(source, f=_compose(_replace_locals, _replace_booleans, _rewrite_assign, _clean_spaces_backtick_quoted_names)): """Compose a collection of tokenization functions Parameters ---------- source : str A Python source code string f : callable This takes a tuple of (toknum, tokval) as its argument and returns a tuple with the same structure but possibly different elements. Defaults to the composition of ``_rewrite_assign``, ``_replace_booleans``, and ``_replace_locals``. Returns ------- s : str Valid Python source code Notes ----- The `f` parameter can be any callable that takes *and* returns input of the form ``(toknum, tokval)``, where ``toknum`` is one of the constants from the ``tokenize`` module and ``tokval`` is a string. """
assert callable(f), 'f must be callable' return tokenize.untokenize(lmap(f, tokenize_string(source)))
<SYSTEM_TASK:> Filter out AST nodes that are subclasses of ``superclass``. <END_TASK> <USER_TASK:> Description: def _filter_nodes(superclass, all_nodes=_all_nodes): """Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) return frozenset(node_names)
<SYSTEM_TASK:> Return a function that raises a NotImplementedError with a passed node <END_TASK> <USER_TASK:> Description: def _node_not_implemented(node_name, cls): """Return a function that raises a NotImplementedError with a passed node name. """
def f(self, *args, **kwargs): raise NotImplementedError("{name!r} nodes are not " "implemented".format(name=node_name)) return f
<SYSTEM_TASK:> Decorator to disallow certain nodes from parsing. Raises a <END_TASK> <USER_TASK:> Description: def disallow(nodes): """Decorator to disallow certain nodes from parsing. Raises a NotImplementedError instead. Returns ------- disallowed : callable """
def disallowed(cls): cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node, cls) name = 'visit_{node}'.format(node=node) cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls return disallowed
<SYSTEM_TASK:> Return a function to create an op class with its symbol already passed. <END_TASK> <USER_TASK:> Description: def _op_maker(op_class, op_symbol): """Return a function to create an op class with its symbol already passed. Returns ------- f : callable """
def f(self, node, *args, **kwargs): """Return a partial function with an Op subclass with an operator already passed. Returns ------- f : callable """ return partial(op_class, op_symbol, *args, **kwargs) return f
<SYSTEM_TASK:> Decorator to add default implementation of ops. <END_TASK> <USER_TASK:> Description: def add_ops(op_classes): """Decorator to add default implementation of ops."""
def f(cls): for op_attr_name, op_class in op_classes.items(): ops = getattr(cls, '{name}_ops'.format(name=op_attr_name)) ops_map = getattr(cls, '{name}_op_nodes_map'.format( name=op_attr_name)) for op in ops: op_node = ops_map[op] if op_node is not None: made_op = _op_maker(op_class, op) setattr(cls, 'visit_{node}'.format(node=op_node), made_op) return cls return f
<SYSTEM_TASK:> Get the names in an expression <END_TASK> <USER_TASK:> Description: def names(self): """Get the names in an expression"""
if is_term(self.terms): return frozenset([self.terms.name]) return frozenset(term.name for term in com.flatten(self.terms))
<SYSTEM_TASK:> return a boolean whether I can attempt conversion to a TimedeltaIndex <END_TASK> <USER_TASK:> Description: def _is_convertible_to_index(other): """ return a boolean whether I can attempt conversion to a TimedeltaIndex """
if isinstance(other, TimedeltaIndex): return True elif (len(other) > 0 and other.inferred_type not in ('floating', 'mixed-integer', 'integer', 'mixed-integer-float', 'mixed')): return True return False
<SYSTEM_TASK:> Return a fixed frequency TimedeltaIndex, with day as the default <END_TASK> <USER_TASK:> Description: def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ Return a fixed frequency TimedeltaIndex, with day as the default frequency Parameters ---------- start : string or timedelta-like, default None Left bound for generating timedeltas end : string or timedelta-like, default None Right bound for generating timedeltas periods : integer, default None Number of periods to generate freq : string or DateOffset, default 'D' Frequency strings can have multiples, e.g. '5H' name : string, default None Name of the resulting TimedeltaIndex closed : string, default None Make the interval closed with respect to the given frequency to the 'left', 'right', or both sides (None) Returns ------- rng : TimedeltaIndex Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end`` (closed on both sides). To learn more about the frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- >>> pd.timedelta_range(start='1 day', periods=4) TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``closed`` parameter specifies which endpoint is included. The default behavior is to include both endpoints. >>> pd.timedelta_range(start='1 day', periods=4, closed='right') TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq='D') The ``freq`` parameter specifies the frequency of the TimedeltaIndex. Only fixed frequencies can be passed, non-fixed frequencies such as 'M' (month end) will raise. >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H') TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00', '1 days 18:00:00', '2 days 00:00:00'], dtype='timedelta64[ns]', freq='6H') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.timedelta_range(start='1 day', end='5 days', periods=4) TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00', '5 days 00:00:00'], dtype='timedelta64[ns]', freq=None) """
if freq is None and com._any_none(periods, start, end): freq = 'D' freq, freq_infer = dtl.maybe_infer_freq(freq) tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed) return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
<SYSTEM_TASK:> Returns a FrozenList with other concatenated to the end of self. <END_TASK> <USER_TASK:> Description: def union(self, other): """ Returns a FrozenList with other concatenated to the end of self. Parameters ---------- other : array-like The array-like whose elements we are concatenating. Returns ------- diff : FrozenList The collection difference between self and other. """
if isinstance(other, tuple): other = list(other) return type(self)(super().__add__(other))
<SYSTEM_TASK:> Returns a FrozenList with elements from other removed from self. <END_TASK> <USER_TASK:> Description: def difference(self, other): """ Returns a FrozenList with elements from other removed from self. Parameters ---------- other : array-like The array-like whose elements we are removing self. Returns ------- diff : FrozenList The collection difference between self and other. """
other = set(other) temp = [x for x in self if x not in other] return type(self)(temp)
<SYSTEM_TASK:> Find indices to insert `value` so as to maintain order. <END_TASK> <USER_TASK:> Description: def searchsorted(self, value, side="left", sorter=None): """ Find indices to insert `value` so as to maintain order. For full documentation, see `numpy.searchsorted` See Also -------- numpy.searchsorted : Equivalent function. """
# We are much more performant if the searched # indexer is the same type as the array. # # This doesn't matter for int64, but DOES # matter for smaller int dtypes. # # xref: https://github.com/numpy/numpy/issues/5370 try: value = self.dtype.type(value) except ValueError: pass return super().searchsorted(value, side=side, sorter=sorter)
<SYSTEM_TASK:> Segregate Series based on type and coerce into matrices. <END_TASK> <USER_TASK:> Description: def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """
# figure out the index, if necessary if index is None: index = extract_index(arrays) else: index = ensure_index(index) # don't force copy because getting jammed in an ndarray anyway arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective axes = [ensure_index(columns), index] return create_block_manager_from_arrays(arrays, arr_names, axes)
<SYSTEM_TASK:> Extract from a masked rec array and create the manager. <END_TASK> <USER_TASK:> Description: def masked_rec_array_to_mgr(data, index, columns, dtype, copy): """ Extract from a masked rec array and create the manager. """
# essentially process a record array then fill it fill_value = data.fill_value fdata = ma.getdata(data) if index is None: index = get_names_from_index(fdata) if index is None: index = ibase.default_index(len(data)) index = ensure_index(index) if columns is not None: columns = ensure_index(columns) arrays, arr_columns = to_arrays(fdata, columns) # fill if needed new_arrays = [] for fv, arr, col in zip(fill_value, arrays, arr_columns): mask = ma.getmaskarray(data[col]) if mask.any(): arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) arr[mask] = fv new_arrays.append(arr) # create the manager arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype) if copy: mgr = mgr.copy() return mgr
<SYSTEM_TASK:> Segregate Series based on type and coerce into matrices. <END_TASK> <USER_TASK:> Description: def init_dict(data, index, columns, dtype=None): """ Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """
if columns is not None: from pandas.core.series import Series arrays = Series(data, index=columns, dtype=object) data_names = arrays.index missing = arrays.isnull() if index is None: # GH10856 # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): if dtype is None or np.issubdtype(dtype, np.flexible): # GH#1783 nan_dtype = object else: nan_dtype = dtype val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) arrays.loc[missing] = [val] * missing.sum() else: keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) # GH#24096 need copy to be deep for datetime64tz case # TODO: See if we can avoid these copies arrays = [data[k] if not is_datetime64tz_dtype(data[k]) else data[k].copy(deep=True) for k in keys] return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
<SYSTEM_TASK:> Return list of arrays, columns. <END_TASK> <USER_TASK:> Description: def to_arrays(data, columns, coerce_float=False, dtype=None): """ Return list of arrays, columns. """
if isinstance(data, ABCDataFrame): if columns is not None: arrays = [data._ixs(i, axis=1).values for i, col in enumerate(data.columns) if col in columns] else: columns = data.columns arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] return arrays, columns if not len(data): if isinstance(data, np.ndarray): columns = data.dtype.names if columns is not None: return [[]] * len(columns), columns return [], [] # columns if columns is not None else [] if isinstance(data[0], (list, tuple)): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], abc.Mapping): return _list_of_dict_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], ABCSeries): return _list_of_series_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and data.dtype.names is not None): columns = list(data.dtype.names) arrays = [data[k] for k in columns] return arrays, columns else: # last ditch effort data = lmap(tuple, data) return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype)
<SYSTEM_TASK:> Sanitize an index type to return an ndarray of the underlying, pass <END_TASK> <USER_TASK:> Description: def sanitize_index(data, index, copy=False): """ Sanitize an index type to return an ndarray of the underlying, pass through a non-Index. """
if index is None: return data if len(data) != len(index): raise ValueError('Length of values does not match length of index') if isinstance(data, ABCIndexClass) and not copy: pass elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)): data = data._values if copy: data = data.copy() elif isinstance(data, np.ndarray): # coerce datetimelike types if data.dtype.kind in ['M', 'm']: data = sanitize_array(data, index, copy=copy) return data
<SYSTEM_TASK:> Make sure a valid engine is passed. <END_TASK> <USER_TASK:> Description: def _check_engine(engine): """Make sure a valid engine is passed. Parameters ---------- engine : str Raises ------ KeyError * If an invalid engine is passed ImportError * If numexpr was requested but doesn't exist Returns ------- string engine """
from pandas.core.computation.check import _NUMEXPR_INSTALLED if engine is None: if _NUMEXPR_INSTALLED: engine = 'numexpr' else: engine = 'python' if engine not in _engines: valid = list(_engines.keys()) raise KeyError('Invalid engine {engine!r} passed, valid engines are' ' {valid}'.format(engine=engine, valid=valid)) # TODO: validate this in a more general way (thinking of future engines # that won't necessarily be import-able) # Could potentially be done on engine instantiation if engine == 'numexpr': if not _NUMEXPR_INSTALLED: raise ImportError("'numexpr' is not installed or an " "unsupported version. Cannot use " "engine='numexpr' for query/eval " "if 'numexpr' is not installed") return engine
<SYSTEM_TASK:> Make sure a valid parser is passed. <END_TASK> <USER_TASK:> Description: def _check_parser(parser): """Make sure a valid parser is passed. Parameters ---------- parser : str Raises ------ KeyError * If an invalid parser is passed """
from pandas.core.computation.expr import _parsers if parser not in _parsers: raise KeyError('Invalid parser {parser!r} passed, valid parsers are' ' {valid}'.format(parser=parser, valid=_parsers.keys()))
<SYSTEM_TASK:> Evaluate a Python expression as a string using various backends. <END_TASK> <USER_TASK:> Description: def eval(expr, parser='pandas', engine=None, truediv=True, local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inplace=False): """Evaluate a Python expression as a string using various backends. The following arithmetic operations are supported: ``+``, ``-``, ``*``, ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, :keyword:`or`, and :keyword:`not` with the same semantics as the corresponding bitwise operators. :class:`~pandas.Series` and :class:`~pandas.DataFrame` objects are supported and behave as they would with plain ol' Python evaluation. Parameters ---------- expr : str or unicode The expression to evaluate. This string cannot contain any Python `statements <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__, only Python `expressions <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__. parser : string, default 'pandas', {'pandas', 'python'} The parser to use to construct the syntax tree from the expression. The default of ``'pandas'`` parses code slightly different than standard Python. Alternatively, you can parse an expression using the ``'python'`` parser to retain strict Python semantics. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. engine : string or None, default 'numexpr', {'python', 'numexpr'} The engine used to evaluate the expression. Supported engines are - None : tries to use ``numexpr``, falls back to ``python`` - ``'numexpr'``: This default engine evaluates pandas objects using numexpr for large speed ups in complex expressions with large frames. - ``'python'``: Performs operations as if you had ``eval``'d in top level python. This engine is generally not that useful. More backends may be available in the future. truediv : bool, optional Whether to use true division, like in Python >= 3 local_dict : dict or None, optional A dictionary of local variables, taken from locals() by default. global_dict : dict or None, optional A dictionary of global variables, taken from globals() by default. resolvers : list of dict-like or None, optional A list of objects implementing the ``__getitem__`` special method that you can use to inject an additional collection of namespaces to use for variable lookup. For example, this is used in the :meth:`~DataFrame.query` method to inject the ``DataFrame.index`` and ``DataFrame.columns`` variables that refer to their respective :class:`~pandas.DataFrame` instance attributes. level : int, optional The number of prior stack frames to traverse and add to the current scope. Most users will **not** need to change this parameter. target : object, optional, default None This is the target object for assignment. It is used when there is variable assignment in the expression. If so, then `target` must support item assignment with string keys, and if a copy is being returned, it must also support `.copy()`. inplace : bool, default False If `target` is provided, and the expression mutates `target`, whether to modify `target` inplace. Otherwise, return a copy of `target` with the mutation. Returns ------- ndarray, numeric scalar, DataFrame, Series Raises ------ ValueError There are many instances where such an error can be raised: - `target=None`, but the expression is multiline. - The expression is multiline, but not all them have item assignment. An example of such an arrangement is this: a = b + 1 a + 2 Here, there are expressions on different lines, making it multiline, but the last line has no variable assigned to the output of `a + 2`. - `inplace=True`, but the expression is missing item assignment. - Item assignment is provided, but the `target` does not support string item assignment. - Item assignment is provided and `inplace=False`, but the `target` does not support the `.copy()` method See Also -------- DataFrame.query DataFrame.eval Notes ----- The ``dtype`` of any objects involved in an arithmetic ``%`` operation are recursively cast to ``float64``. See the :ref:`enhancing performance <enhancingperf.eval>` documentation for more details. """
from pandas.core.computation.expr import Expr inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError("multi-line expressions are only valid in the " "context of data, use DataFrame.eval") ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) _check_for_locals(expr, level, parser) # get our (possibly passed-in) scope env = _ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, truediv=truediv) # construct the engine and evaluate the parsed expression eng = _engines[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError("Multi-line expressions are only valid" " if all expressions contain an assignment") elif inplace: raise ValueError("Cannot operate inplace " "if there is no assignment") # assign if needed assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True # if returning a copy, copy only on the first assignment if not inplace and first_expr: try: target = env.target.copy() except AttributeError: raise ValueError("Cannot return a copy of the target") else: target = env.target # TypeError is most commonly raised (e.g. int, list), but you # get IndexError if you try to do this assignment on np.ndarray. # we will ignore numpy warnings here; e.g. if trying # to use a non-numeric indexer try: with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret except (TypeError, IndexError): raise ValueError("Cannot assign expression output to target") if not resolvers: resolvers = ({assigner: ret},) else: # existing resolver needs updated to handle # case of mutating existing column in copy for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False # We want to exclude `inplace=None` as being False. if inplace is False: return target if target_modified else ret
<SYSTEM_TASK:> Convert arrays to MultiIndex. <END_TASK> <USER_TASK:> Description: def from_arrays(cls, arrays, sortorder=None, names=None): """ Convert arrays to MultiIndex. Parameters ---------- arrays : list / sequence of array-likes Each array-like gives one level's value for each data point. len(arrays) is the number of levels. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']] >>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """
error_msg = "Input must be a list / sequence of array-likes." if not is_list_like(arrays): raise TypeError(error_msg) elif is_iterator(arrays): arrays = list(arrays) # Check if elements of array are list-like for array in arrays: if not is_list_like(array): raise TypeError(error_msg) # Check if lengths of all arrays are equal or not, # raise ValueError, if not for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') from pandas.core.arrays.categorical import _factorize_from_iterables codes, levels = _factorize_from_iterables(arrays) if names is None: names = [getattr(arr, "name", None) for arr in arrays] return MultiIndex(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False)
<SYSTEM_TASK:> Convert list of tuples to MultiIndex. <END_TASK> <USER_TASK:> Description: def from_tuples(cls, tuples, sortorder=None, names=None): """ Convert list of tuples to MultiIndex. Parameters ---------- tuples : list / sequence of tuple-likes Each tuple is the index of one row/column. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> tuples = [(1, 'red'), (1, 'blue'), ... (2, 'red'), (2, 'blue')] >>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color')) MultiIndex(levels=[[1, 2], ['blue', 'red']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['number', 'color']) """
if not is_list_like(tuples): raise TypeError('Input must be a list / sequence of tuple-likes.') elif is_iterator(tuples): tuples = list(tuples) if len(tuples) == 0: if names is None: msg = 'Cannot infer number of levels from empty list' raise TypeError(msg) arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = tuples._values arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrays = lzip(*tuples) return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
<SYSTEM_TASK:> Make a MultiIndex from the cartesian product of multiple iterables. <END_TASK> <USER_TASK:> Description: def from_product(cls, iterables, sortorder=None, names=None): """ Make a MultiIndex from the cartesian product of multiple iterables. Parameters ---------- iterables : list / sequence of iterables Each iterable has unique labels for each level of the index. sortorder : int or None Level of sortedness (must be lexicographically sorted by that level). names : list / sequence of str, optional Names for the levels in the index. Returns ------- index : MultiIndex See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_frame : Make a MultiIndex from a DataFrame. Examples -------- >>> numbers = [0, 1, 2] >>> colors = ['green', 'purple'] >>> pd.MultiIndex.from_product([numbers, colors], ... names=['number', 'color']) MultiIndex(levels=[[0, 1, 2], ['green', 'purple']], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]], names=['number', 'color']) """
from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): raise TypeError("Input must be a list / sequence of iterables.") elif is_iterator(iterables): iterables = list(iterables) codes, levels = _factorize_from_iterables(iterables) codes = cartesian_product(codes) return MultiIndex(levels, codes, sortorder=sortorder, names=names)
<SYSTEM_TASK:> Make a MultiIndex from a DataFrame. <END_TASK> <USER_TASK:> Description: def from_frame(cls, df, sortorder=None, names=None): """ Make a MultiIndex from a DataFrame. .. versionadded:: 0.24.0 Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'], ... ['NJ', 'Temp'], ['NJ', 'Precip']], ... columns=['a', 'b']) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=['state', 'observation']) MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']], codes=[[0, 0, 1, 1], [1, 0, 1, 0]], names=['state', 'observation']) """
if not isinstance(df, ABCDataFrame): raise TypeError("Input must be a DataFrame") column_names, columns = lzip(*df.iteritems()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names)
<SYSTEM_TASK:> Set new levels on MultiIndex. Defaults to returning <END_TASK> <USER_TASK:> Description: def set_levels(self, levels, level=None, inplace=False, verify_integrity=True): """ Set new levels on MultiIndex. Defaults to returning new index. Parameters ---------- levels : sequence or list of sequence new level(s) to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level=0) MultiIndex(levels=[['a', 'b'], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels(['a','b'], level='bar') MultiIndex(levels=[[1, 2], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_levels([['a','b'], [1,2]], level=[0,1]) MultiIndex(levels=[['a', 'b'], [1, 2]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['foo', 'bar']) """
if is_list_like(levels) and not isinstance(levels, Index): levels = list(levels) if level is not None and not is_list_like(level): if not is_list_like(levels): raise TypeError("Levels must be list-like") if is_list_like(levels[0]): raise TypeError("Levels must be list-like") level = [level] levels = [levels] elif level is None or is_list_like(level): if not is_list_like(levels) or not is_list_like(levels[0]): raise TypeError("Levels must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) if not inplace: return idx
<SYSTEM_TASK:> Set new codes on MultiIndex. Defaults to returning <END_TASK> <USER_TASK:> Description: def set_codes(self, codes, level=None, inplace=False, verify_integrity=True): """ Set new codes on MultiIndex. Defaults to returning new index. .. versionadded:: 0.24.0 New name for deprecated method `set_labels`. Parameters ---------- codes : sequence or list of sequence new codes to apply level : int, level name, or sequence of int/level names (default None) level(s) to set (None for all levels) inplace : bool if True, mutates in place verify_integrity : bool (default True) if True, checks that levels and codes are compatible Returns ------- new index (of same type and class...etc) Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([1,0,1,0], level=0) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 1, 0, 1]], names=['foo', 'bar']) >>> idx.set_codes([0,0,1,1], level='bar') MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 1, 1], [0, 0, 1, 1]], names=['foo', 'bar']) >>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1]) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[1, 0, 1, 0], [0, 0, 1, 1]], names=['foo', 'bar']) """
if level is not None and not is_list_like(level): if not is_list_like(codes): raise TypeError("Codes must be list-like") if is_list_like(codes[0]): raise TypeError("Codes must be list-like") level = [level] codes = [codes] elif level is None or is_list_like(level): if not is_list_like(codes) or not is_list_like(codes[0]): raise TypeError("Codes must be list of lists-like") if inplace: idx = self else: idx = self._shallow_copy() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) if not inplace: return idx
<SYSTEM_TASK:> Make a copy of this object. Names, dtype, levels and codes can be <END_TASK> <USER_TASK:> Description: def copy(self, names=None, dtype=None, levels=None, codes=None, deep=False, _set_identity=False, **kwargs): """ Make a copy of this object. Names, dtype, levels and codes can be passed and will be set on new copy. Parameters ---------- names : sequence, optional dtype : numpy dtype or pandas type, optional levels : sequence, optional codes : sequence, optional Returns ------- copy : MultiIndex Notes ----- In most cases, there should be no functional difference from using ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """
name = kwargs.get('name') names = self._validate_names(name=name, names=names, deep=deep) if deep: from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if codes is None: codes = deepcopy(self.codes) else: if levels is None: levels = self.levels if codes is None: codes = self.codes return MultiIndex(levels=levels, codes=codes, names=names, sortorder=self.sortorder, verify_integrity=False, _set_identity=_set_identity)
<SYSTEM_TASK:> this is defined as a copy with the same identity <END_TASK> <USER_TASK:> Description: def view(self, cls=None): """ this is defined as a copy with the same identity """
result = self.copy() result._id = self._id return result
<SYSTEM_TASK:> return a boolean if we need a qualified .info display <END_TASK> <USER_TASK:> Description: def _is_memory_usage_qualified(self): """ return a boolean if we need a qualified .info display """
def f(l): return 'mixed' in l or 'string' in l or 'unicode' in l return any(f(l) for l in self._inferred_type_levels)
<SYSTEM_TASK:> return the number of bytes in the underlying data <END_TASK> <USER_TASK:> Description: def _nbytes(self, deep=False): """ return the number of bytes in the underlying data deeply introspect the level data if deep=True include the engine hashtable *this is in internal routine* """
# for implementations with no useful getsizeof (PyPy) objsize = 24 level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels) label_nbytes = sum(i.nbytes for i in self.codes) names_nbytes = sum(getsizeof(i, objsize) for i in self.names) result = level_nbytes + label_nbytes + names_nbytes # include our engine hashtable result += self._engine.sizeof(deep=deep) return result
<SYSTEM_TASK:> validate and return the hash for the provided key <END_TASK> <USER_TASK:> Description: def _hashed_indexing_key(self, key): """ validate and return the hash for the provided key *this is internal for use for the cython routines* Parameters ---------- key : string or tuple Returns ------- np.uint64 Notes ----- we need to stringify if we have mixed levels """
from pandas.core.util.hashing import hash_tuples, hash_tuple if not isinstance(key, tuple): return hash_tuples(key) if not len(key) == self.nlevels: raise KeyError def f(k, stringify): if stringify and not isinstance(k, str): k = str(k) return k key = tuple(f(k, stringify) for k, stringify in zip(key, self._have_mixed_levels)) return hash_tuple(key)
<SYSTEM_TASK:> Return vector of label values for requested level, <END_TASK> <USER_TASK:> Description: def _get_level_values(self, level, unique=False): """ Return vector of label values for requested level, equal to the length of the index **this is an internal method** Parameters ---------- level : int level unique : bool, default False if True, drop duplicated values Returns ------- values : ndarray """
values = self.levels[level] level_codes = self.codes[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_1d(values._values, level_codes, fill_value=values._na_value) values = values._shallow_copy(filled) return values
<SYSTEM_TASK:> Return vector of label values for requested level, <END_TASK> <USER_TASK:> Description: def get_level_values(self, level): """ Return vector of label values for requested level, equal to the length of the index. Parameters ---------- level : int or str ``level`` is either the integer position of the level in the MultiIndex, or the name of the level. Returns ------- values : Index Values is a level of this MultiIndex converted to a single :class:`Index` (or subclass thereof). Examples --------- Create a MultiIndex: >>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def'))) >>> mi.names = ['level_1', 'level_2'] Get level values by supplying level as either integer or name: >>> mi.get_level_values(0) Index(['a', 'b', 'c'], dtype='object', name='level_1') >>> mi.get_level_values('level_2') Index(['d', 'e', 'f'], dtype='object', name='level_2') """
level = self._get_level_number(level) values = self._get_level_values(level) return values
<SYSTEM_TASK:> Create a DataFrame with the levels of the MultiIndex as columns. <END_TASK> <USER_TASK:> Description: def to_frame(self, index=True, name=None): """ Create a DataFrame with the levels of the MultiIndex as columns. Column ordering is determined by the DataFrame constructor with data as a dict. .. versionadded:: 0.24.0 Parameters ---------- index : boolean, default True Set the index of the returned DataFrame as the original MultiIndex. name : list / sequence of strings, optional The passed names should substitute index level names. Returns ------- DataFrame : a DataFrame containing the original MultiIndex data. See Also -------- DataFrame """
from pandas import DataFrame if name is not None: if not is_list_like(name): raise TypeError("'name' must be a list / sequence " "of column names.") if len(name) != len(self.levels): raise ValueError("'name' should have same length as " "number of levels on index.") idx_names = name else: idx_names = self.names # Guarantee resulting column order result = DataFrame( OrderedDict([ ((level if lvlname is None else lvlname), self._get_level_values(level)) for lvlname, level in zip(idx_names, range(len(self.levels))) ]), copy=False ) if index: result.index = self return result
<SYSTEM_TASK:> Return a MultiIndex reshaped to conform to the <END_TASK> <USER_TASK:> Description: def to_hierarchical(self, n_repeat, n_shuffle=1): """ Return a MultiIndex reshaped to conform to the shapes given by n_repeat and n_shuffle. .. deprecated:: 0.24.0 Useful to replicate and rearrange a MultiIndex for combination with another Index with n_repeat items. Parameters ---------- n_repeat : int Number of times to repeat the labels on self n_shuffle : int Controls the reordering of the labels. If the result is going to be an inner level in a MultiIndex, n_shuffle will need to be greater than one. The size of each label must divisible by n_shuffle. Returns ------- MultiIndex Examples -------- >>> idx = pd.MultiIndex.from_tuples([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')]) >>> idx.to_hierarchical(3) MultiIndex(levels=[[1, 2], ['one', 'two']], codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]]) """
levels = self.levels codes = [np.repeat(level_codes, n_repeat) for level_codes in self.codes] # Assumes that each level_codes is divisible by n_shuffle codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes] names = self.names warnings.warn("Method .to_hierarchical is deprecated and will " "be removed in a future version", FutureWarning, stacklevel=2) return MultiIndex(levels=levels, codes=codes, names=names)
<SYSTEM_TASK:> Create a new MultiIndex from the current that removes <END_TASK> <USER_TASK:> Description: def remove_unused_levels(self): """ Create a new MultiIndex from the current that removes unused levels, meaning that they are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. .. versionadded:: 0.20.0 Returns ------- MultiIndex Examples -------- >>> i = pd.MultiIndex.from_product([range(2), list('ab')]) MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> i[2:] MultiIndex(levels=[[0, 1], ['a', 'b']], codes=[[1, 1], [0, 1]]) The 0 from the first level is not represented and can be removed >>> i[2:].remove_unused_levels() MultiIndex(levels=[[1], ['a', 'b']], codes=[[0, 0], [0, 1]]) """
new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): # Since few levels are typically unused, bincount() is more # efficient than unique() - however it only accepts positive values # (and drops order): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and (uniques[0] == -1)) if len(uniques) != len(lev) + has_na: # We have unused levels changed = True # Recalculate uniques, now preserving order. # Can easily be cythonized by exploiting the already existing # "uniques" and stop parsing "level_codes" when all items # are found: uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] # Just ensure that -1 is in first position: uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] # codes get mapped from uniques to 0:len(uniques) # -1 (if present) is mapped to last position code_mapping = np.zeros(len(lev) + has_na) # ... and reassigned value -1: code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] # new levels are simple lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self._shallow_copy() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result
<SYSTEM_TASK:> Internal method to handle NA filling of take <END_TASK> <USER_TASK:> Description: def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=None): """ Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) taken = [lab.take(indices) for lab in self.codes] mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label.values() label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked else: taken = [lab.take(indices) for lab in self.codes] return taken
<SYSTEM_TASK:> Append a collection of Index options together <END_TASK> <USER_TASK:> Description: def append(self, other): """ Append a collection of Index options together Parameters ---------- other : Index or list/tuple of indices Returns ------- appended : Index """
if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels) for o in other): arrays = [] for i in range(self.nlevels): label = self._get_level_values(i) appended = [o._get_level_values(i) for o in other] arrays.append(label.append(appended)) return MultiIndex.from_arrays(arrays, names=self.names) to_concat = (self.values, ) + tuple(k._values for k in other) new_tuples = np.concatenate(to_concat) # if all(isinstance(x, MultiIndex) for x in other): try: return MultiIndex.from_tuples(new_tuples, names=self.names) except (TypeError, IndexError): return Index(new_tuples)
<SYSTEM_TASK:> Make new MultiIndex with passed list of codes deleted <END_TASK> <USER_TASK:> Description: def drop(self, codes, level=None, errors='raise'): """ Make new MultiIndex with passed list of codes deleted Parameters ---------- codes : array-like Must be a list of tuples level : int or level name, default None Returns ------- dropped : MultiIndex """
if level is not None: return self._drop_from_level(codes, level) try: if not isinstance(codes, (np.ndarray, Index)): codes = com.index_labels_to_array(codes) indexer = self.get_indexer(codes) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise ValueError('codes %s not contained in axis' % codes[mask]) except Exception: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) # get_loc returns either an integer, a slice, or a boolean # mask if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' 'performance.', PerformanceWarning, stacklevel=3) loc = loc.nonzero()[0] inds.extend(loc) else: msg = 'unsupported indexer of type {}'.format(type(loc)) raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds)
<SYSTEM_TASK:> Swap level i with level j. <END_TASK> <USER_TASK:> Description: def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. Calling this method does not change the ordering of the values. Parameters ---------- i : int, str, default -2 First level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. j : int, str, default -1 Second level of index to be swapped. Can pass level name as string. Type of parameters can be mixed. Returns ------- MultiIndex A new MultiIndex. .. versionchanged:: 0.18.1 The indexes ``i`` and ``j`` are now optional, and default to the two innermost levels of the index. See Also -------- Series.swaplevel : Swap levels i and j in a MultiIndex. Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a particular axis. Examples -------- >>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], ... codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi MultiIndex(levels=[['a', 'b'], ['bb', 'aa']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]) >>> mi.swaplevel(0, 1) MultiIndex(levels=[['bb', 'aa'], ['a', 'b']], codes=[[0, 1, 0, 1], [0, 0, 1, 1]]) """
new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) new_levels[i], new_levels[j] = new_levels[j], new_levels[i] new_codes[i], new_codes[j] = new_codes[j], new_codes[i] new_names[i], new_names[j] = new_names[j], new_names[i] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
<SYSTEM_TASK:> Rearrange levels using input order. May not drop or duplicate levels <END_TASK> <USER_TASK:> Description: def reorder_levels(self, order): """ Rearrange levels using input order. May not drop or duplicate levels Parameters ---------- """
order = [self._get_level_number(i) for i in order] if len(order) != self.nlevels: raise AssertionError('Length of order must be same as ' 'number of levels (%d), got %d' % (self.nlevels, len(order))) new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False)
<SYSTEM_TASK:> Sort MultiIndex at the requested level. The result will respect the <END_TASK> <USER_TASK:> Description: def sortlevel(self, level=0, ascending=True, sort_remaining=True): """ Sort MultiIndex at the requested level. The result will respect the original ordering of the associated factor at that level. Parameters ---------- level : list-like, int or str, default 0 If a string is given, must be a name of the level If list-like must be names or ints of levels. ascending : boolean, default True False to sort in descending order Can also be a list to specify a directed ordering sort_remaining : sort by the remaining levels after level Returns ------- sorted_index : pd.MultiIndex Resulting index. indexer : np.ndarray Indices of output values in original index. """
from pandas.core.sorting import indexer_from_factorized if isinstance(level, (str, int)): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None # we have a directed ordering via ascending if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError("level must have same length as ascending") from pandas.core.sorting import lexsort_indexer indexer = lexsort_indexer([self.codes[lev] for lev in level], orders=ascending) # level ordering else: codes = list(self.codes) shape = list(self.levshape) # partition codes and shape primary = tuple(codes[lev] for lev in level) primshp = tuple(shape[lev] for lev in level) # Reverse sorted to retain the order of # smaller indices that needs to be removed for lev in sorted(level, reverse=True): codes.pop(lev) shape.pop(lev) if sort_remaining: primary += primary + tuple(codes) primshp += primshp + tuple(shape) else: sortorder = level[0] indexer = indexer_from_factorized(primary, primshp, compress=False) if not ascending: indexer = indexer[::-1] indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex(codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return new_index, indexer
<SYSTEM_TASK:> For an ordered MultiIndex, compute the slice locations for input <END_TASK> <USER_TASK:> Description: def slice_locs(self, start=None, end=None, step=None, kind=None): """ For an ordered MultiIndex, compute the slice locations for input labels. The input labels can be tuples representing partial levels, e.g. for a MultiIndex with 3 levels, you can pass a single value (corresponding to the first level), or a 1-, 2-, or 3-tuple. Parameters ---------- start : label or tuple, default None If None, defaults to the beginning end : label or tuple If None, defaults to the end step : int or None Slice step kind : string, optional, defaults None Returns ------- (start, end) : (int, int) Notes ----- This method only works if the MultiIndex is properly lexsorted. So, if only the first 2 levels of a 3-level MultiIndex are lexsorted, you can only pass two levels to ``.slice_locs``. Examples -------- >>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')], ... names=['A', 'B']) Get the slice locations from the beginning of 'b' in the first level until the end of the multiindex: >>> mi.slice_locs(start='b') (1, 4) Like above, but stop at the end of 'b' in the first level and 'f' in the second level: >>> mi.slice_locs(start='b', end=('b', 'f')) (1, 3) See Also -------- MultiIndex.get_loc : Get location for a label or a tuple of labels. MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """
# This function adds nothing to its parent implementation (the magic # happens in get_slice_bound method), but it adds meaningful doc. return super().slice_locs(start, end, step, kind=kind)
<SYSTEM_TASK:> Get location for a label or a tuple of labels as an integer, slice or <END_TASK> <USER_TASK:> Description: def get_loc(self, key, method=None): """ Get location for a label or a tuple of labels as an integer, slice or boolean mask. Parameters ---------- key : label or tuple of labels (one for each level) method : None Returns ------- loc : int, slice object or boolean mask If the key is past the lexsort depth, the return may be a boolean mask array, otherwise it is always a slice or int. Examples --------- >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')]) >>> mi.get_loc('b') slice(1, 3, None) >>> mi.get_loc(('b', 'e')) 1 Notes ------ The key cannot be a slice, list of same-level labels, a boolean mask, or a sequence of such. If you want to use those, use :meth:`MultiIndex.get_locs` instead. See Also -------- Index.get_loc : The get_loc method for (single-level) index. MultiIndex.slice_locs : Get slice location given start label(s) and end label(s). MultiIndex.get_locs : Get location for a label/slice/list/mask or a sequence of such. """
if method is not None: raise NotImplementedError('only the default get_loc method is ' 'currently supported for MultiIndex') def _maybe_to_slice(loc): """convert integer indexer to boolean mask or slice if possible""" if not isinstance(loc, np.ndarray) or loc.dtype != 'int64': return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError('Key length ({0}) exceeds index depth ({1})' ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: return self._engine.get_loc(key) # -- partial selection or non-unique index # break the key into 2 parts based on the lexsort_depth of the index; # the first part returns a continuous slice of the index; the 2nd part # needs linear search within the slice i = self.lexsort_depth lead_key, follow_key = key[:i], key[i:] start, stop = (self.slice_locs(lead_key, lead_key) if lead_key else (0, len(self))) if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=10) loc = np.arange(start, stop, dtype='int64') for i, k in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self.levels[i].get_loc(k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return (_maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop))
<SYSTEM_TASK:> Return True if the levels of both MultiIndex objects are the same <END_TASK> <USER_TASK:> Description: def equal_levels(self, other): """ Return True if the levels of both MultiIndex objects are the same """
if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True
<SYSTEM_TASK:> Form the union of two MultiIndex objects <END_TASK> <USER_TASK:> Description: def union(self, other, sort=None): """ Form the union of two MultiIndex objects Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default None Whether to sort the resulting Index. * None : Sort the result, except when 1. `self` and `other` are equal. 2. `self` has length 0. 3. Some values in `self` or `other` cannot be compared. A RuntimeWarning is issued in this case. * False : do not sort the result. .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- Index >>> index.union(index2) """
self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0 or self.equals(other): return self # TODO: Index.union returns other when `len(self)` is 0. uniq_tuples = lib.fast_unique_multiple([self._ndarray_values, other._ndarray_values], sort=sort) return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
<SYSTEM_TASK:> Form the intersection of two MultiIndex objects. <END_TASK> <USER_TASK:> Description: def intersection(self, other, sort=False): """ Form the intersection of two MultiIndex objects. Parameters ---------- other : MultiIndex or array / Index of tuples sort : False or None, default False Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default from ``True`` to ``False``, to match behaviour from before 0.24.0 Returns ------- Index """
self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if self.equals(other): return self self_tuples = self._ndarray_values other_tuples = other._ndarray_values uniq_tuples = set(self_tuples) & set(other_tuples) if sort is None: uniq_tuples = sorted(uniq_tuples) if len(uniq_tuples) == 0: return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0, names=result_names)
<SYSTEM_TASK:> Compute set difference of two MultiIndex objects <END_TASK> <USER_TASK:> Description: def difference(self, other, sort=None): """ Compute set difference of two MultiIndex objects Parameters ---------- other : MultiIndex sort : False or None, default None Sort the resulting MultiIndex if possible .. versionadded:: 0.24.0 .. versionchanged:: 0.24.1 Changed the default value from ``True`` to ``None`` (without change in behaviour). Returns ------- diff : MultiIndex """
self._validate_sort_keyword(sort) self._assert_can_do_setop(other) other, result_names = self._convert_can_do_setop(other) if len(other) == 0: return self if self.equals(other): return MultiIndex(levels=self.levels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) this = self._get_unique_index() indexer = this.get_indexer(other) indexer = indexer.take((indexer != -1).nonzero()[0]) label_diff = np.setdiff1d(np.arange(this.size), indexer, assume_unique=True) difference = this.values.take(label_diff) if sort is None: difference = sorted(difference) if len(difference) == 0: return MultiIndex(levels=[[]] * self.nlevels, codes=[[]] * self.nlevels, names=result_names, verify_integrity=False) else: return MultiIndex.from_tuples(difference, sortorder=0, names=result_names)
<SYSTEM_TASK:> Make new index with passed location deleted <END_TASK> <USER_TASK:> Description: def delete(self, loc): """ Make new index with passed location deleted Returns ------- new_index : MultiIndex """
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False)
<SYSTEM_TASK:> routine to ensure that our data is of the correct <END_TASK> <USER_TASK:> Description: def _ensure_data(values, dtype=None): """ routine to ensure that our data is of the correct input dtype for lower-level routines This will coerce: - ints -> int64 - uint -> uint64 - bool -> uint64 (TODO this should be uint8) - datetimelike -> i8 - datetime64tz -> i8 (in local tz) - categorical -> codes Parameters ---------- values : array-like dtype : pandas_dtype, optional coerce to this dtype Returns ------- (ndarray, pandas_dtype, algo dtype as a string) """
# we check some simple dtypes first try: if is_object_dtype(dtype): return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(): simplefilter("ignore", np.ComplexWarning) values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError, OverflowError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or is_period_dtype(dtype) or is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype)): if is_period_dtype(values) or is_period_dtype(dtype): from pandas import PeriodIndex values = PeriodIndex(values) dtype = values.dtype elif is_timedelta64_dtype(values) or is_timedelta64_dtype(dtype): from pandas import TimedeltaIndex values = TimedeltaIndex(values) dtype = values.dtype else: # Datetime from pandas import DatetimeIndex values = DatetimeIndex(values) dtype = values.dtype return values.asi8, dtype, 'int64' elif (is_categorical_dtype(values) and (is_categorical_dtype(dtype) or dtype is None)): values = getattr(values, 'values', values) values = values.codes dtype = 'category' # we are actually coercing to int64 # until our algos support int* directly (not all do) values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values, dtype=np.object) return ensure_object(values), 'object', 'object'
<SYSTEM_TASK:> reverse of _ensure_data <END_TASK> <USER_TASK:> Description: def _reconstruct_data(values, dtype, original): """ reverse of _ensure_data Parameters ---------- values : ndarray dtype : pandas_dtype original : ndarray-like Returns ------- Index for extension types, otherwise ndarray casted to dtype """
from pandas import Index if is_extension_array_dtype(dtype): values = dtype.construct_array_type()._from_sequence(values) elif is_datetime64tz_dtype(dtype) or is_period_dtype(dtype): values = Index(original)._shallow_copy(values, name=None) elif is_bool_dtype(dtype): values = values.astype(dtype) # we only support object dtypes bool Index if isinstance(original, Index): values = values.astype(object) elif dtype is not None: values = values.astype(dtype) return values
<SYSTEM_TASK:> ensure that we are arraylike if not already <END_TASK> <USER_TASK:> Description: def _ensure_arraylike(values): """ ensure that we are arraylike if not already """
if not is_array_like(values): inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'unicode']: if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values
<SYSTEM_TASK:> Compute locations of to_match into values <END_TASK> <USER_TASK:> Description: def match(to_match, values, na_sentinel=-1): """ Compute locations of to_match into values Parameters ---------- to_match : array-like values to find positions of values : array-like Unique set of values na_sentinel : int, default -1 Value to mark "not found" Examples -------- Returns ------- match : ndarray of integers """
values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) table.map_locations(values) result = table.lookup(to_match) if na_sentinel != -1: # replace but return a numpy array # use a Series because it handles dtype conversions properly from pandas import Series result = Series(result.ravel()).replace(-1, na_sentinel) result = result.values.reshape(result.shape) return result
<SYSTEM_TASK:> Hash table-based unique. Uniques are returned in order <END_TASK> <USER_TASK:> Description: def unique(values): """ Hash table-based unique. Uniques are returned in order of appearance. This does NOT sort. Significantly faster than numpy.unique. Includes NA values. Parameters ---------- values : 1d array-like Returns ------- numpy.ndarray or ExtensionArray The return can be: * Index : when the input is an Index * Categorical : when the input is a Categorical dtype * ndarray : when the input is a Series/ndarray Return numpy.ndarray or ExtensionArray. See Also -------- Index.unique Series.unique Examples -------- >>> pd.unique(pd.Series([2, 1, 3, 3])) array([2, 1, 3]) >>> pd.unique(pd.Series([2] + [1] * 5)) array([2, 1]) >>> pd.unique(pd.Series([pd.Timestamp('20160101'), ... pd.Timestamp('20160101')])) array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') >>> pd.unique(pd.Series([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) array([Timestamp('2016-01-01 00:00:00-0500', tz='US/Eastern')], dtype=object) >>> pd.unique(pd.Index([pd.Timestamp('20160101', tz='US/Eastern'), ... pd.Timestamp('20160101', tz='US/Eastern')])) DatetimeIndex(['2016-01-01 00:00:00-05:00'], ... dtype='datetime64[ns, US/Eastern]', freq=None) >>> pd.unique(list('baabc')) array(['b', 'a', 'c'], dtype=object) An unordered Categorical will return categories in the order of appearance. >>> pd.unique(pd.Series(pd.Categorical(list('baabc')))) [b, a, c] Categories (3, object): [b, a, c] >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc')))) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.unique(pd.Series(pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True))) [b, a, c] Categories (3, object): [a < b < c] An array of tuples >>> pd.unique([('a', 'b'), ('b', 'a'), ('a', 'c'), ('b', 'a')]) array([('a', 'b'), ('b', 'a'), ('a', 'c')], dtype=object) """
values = _ensure_arraylike(values) if is_extension_array_dtype(values): # Dispatch to extension dtype's unique. return values.unique() original = values htable, _, values, dtype, ndtype = _get_hashtable_algo(values) table = htable(len(values)) uniques = table.unique(values) uniques = _reconstruct_data(uniques, dtype, original) return uniques
<SYSTEM_TASK:> Compute the isin boolean array <END_TASK> <USER_TASK:> Description: def isin(comps, values): """ Compute the isin boolean array Parameters ---------- comps : array-like values : array-like Returns ------- boolean array same length as comps """
if not is_list_like(comps): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{comps_type}]" .format(comps_type=type(comps).__name__)) if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) if not isinstance(values, (ABCIndex, ABCSeries, np.ndarray)): values = construct_1d_object_array_from_listlike(list(values)) if is_categorical_dtype(comps): # TODO(extension) # handle categoricals return comps._values.isin(values) comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) # faster for larger cases to use np.in1d f = lambda x, y: htable.ismember_object(x, values) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) elif is_integer_dtype(comps): try: values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) except (TypeError, ValueError, OverflowError): values = values.astype(object) comps = comps.astype(object) elif is_float_dtype(comps): try: values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) f = lambda x, y: htable.ismember_float64(x, y) except (TypeError, ValueError): values = values.astype(object) comps = comps.astype(object) return f(comps, values)
<SYSTEM_TASK:> Factorize an array-like to labels and uniques. <END_TASK> <USER_TASK:> Description: def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None): """Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray """
(hash_klass, _), values = _get_data_algo(values, _hashtables) table = hash_klass(size_hint or len(values)) uniques, labels = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value) labels = ensure_platform_int(labels) return labels, uniques
<SYSTEM_TASK:> Compute a histogram of the counts of non-null values. <END_TASK> <USER_TASK:> Description: def value_counts(values, sort=True, ascending=False, normalize=False, bins=None, dropna=True): """ Compute a histogram of the counts of non-null values. Parameters ---------- values : ndarray (1-d) sort : boolean, default True Sort by values ascending : boolean, default False Sort in ascending order normalize: boolean, default False If True then compute a relative histogram bins : integer, optional Rather than count values, group them into half-open bins, convenience for pd.cut, only works with numeric data dropna : boolean, default True Don't include counts of NaN Returns ------- value_counts : Series """
from pandas.core.series import Series, Index name = getattr(values, 'name', None) if bins is not None: try: from pandas.core.reshape.tile import cut values = Series(values) ii = cut(values, bins, include_lowest=True) except TypeError: raise TypeError("bins argument only works with numeric data.") # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) result = result[result.index.notna()] result.index = result.index.astype('interval') result = result.sort_index() # if we are dropna and we have NO values if dropna and (result.values == 0).all(): result = result.iloc[0:0] # normalizing is by len of all (regardless of dropna) counts = np.array([len(ii)]) else: if is_extension_array_dtype(values) or is_sparse(values): # handle Categorical and sparse, result = Series(values)._values.value_counts(dropna=dropna) result.name = name counts = result.values else: keys, counts = _value_counts_arraylike(values, dropna) if not isinstance(keys, Index): keys = Index(keys) result = Series(counts, index=keys, name=name) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / float(counts.sum()) return result
<SYSTEM_TASK:> Return boolean ndarray denoting duplicate values. <END_TASK> <USER_TASK:> Description: def duplicated(values, keep='first'): """ Return boolean ndarray denoting duplicate values. .. versionadded:: 0.19.0 Parameters ---------- values : ndarray-like Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : ndarray """
values, dtype, ndtype = _ensure_data(values) f = getattr(htable, "duplicated_{dtype}".format(dtype=ndtype)) return f(values, keep=keep)
<SYSTEM_TASK:> Rank the values along a given axis. <END_TASK> <USER_TASK:> Description: def rank(values, axis=0, method='average', na_option='keep', ascending=True, pct=False): """ Rank the values along a given axis. Parameters ---------- values : array-like Array whose values will be ranked. The number of dimensions in this array must not exceed 2. axis : int, default 0 Axis over which to perform rankings. method : {'average', 'min', 'max', 'first', 'dense'}, default 'average' The method by which tiebreaks are broken during the ranking. na_option : {'keep', 'top'}, default 'keep' The method by which NaNs are placed in the ranking. - ``keep``: rank each NaN value with a NaN ranking - ``top``: replace each NaN with either +/- inf so that they there are ranked at the top ascending : boolean, default True Whether or not the elements should be ranked in ascending order. pct : boolean, default False Whether or not to the display the returned rankings in integer form (e.g. 1, 2, 3) or in percentile form (e.g. 0.333..., 0.666..., 1). """
if values.ndim == 1: f, values = _get_data_algo(values, _rank1d_functions) ranks = f(values, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: f, values = _get_data_algo(values, _rank2d_functions) ranks = f(values, axis=axis, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) else: raise TypeError("Array with ndim > 2 are not supported.") return ranks
<SYSTEM_TASK:> Perform array addition that checks for underflow and overflow. <END_TASK> <USER_TASK:> Description: def checked_add_with_arr(arr, b, arr_mask=None, b_mask=None): """ Perform array addition that checks for underflow and overflow. Performs the addition of an int64 array and an int64 integer (or array) but checks that they do not result in overflow first. For elements that are indicated to be NaN, whether or not there is overflow for that element is automatically ignored. Parameters ---------- arr : array addend. b : array or scalar addend. arr_mask : boolean array or None array indicating which elements to exclude from checking b_mask : boolean array or boolean or None array or scalar indicating which element(s) to exclude from checking Returns ------- sum : An array for elements x + b for each element x in arr if b is a scalar or an array for elements x + y for each element pair (x, y) in (arr, b). Raises ------ OverflowError if any x + y exceeds the maximum or minimum int64 value. """
# For performance reasons, we broadcast 'b' to the new array 'b2' # so that it has the same size as 'arr'. b2 = np.broadcast_to(b, arr.shape) if b_mask is not None: # We do the same broadcasting for b_mask as well. b2_mask = np.broadcast_to(b_mask, arr.shape) else: b2_mask = None # For elements that are NaN, regardless of their value, we should # ignore whether they overflow or not when doing the checked add. if arr_mask is not None and b2_mask is not None: not_nan = np.logical_not(arr_mask | b2_mask) elif arr_mask is not None: not_nan = np.logical_not(arr_mask) elif b_mask is not None: not_nan = np.logical_not(b2_mask) else: not_nan = np.empty(arr.shape, dtype=bool) not_nan.fill(True) # gh-14324: For each element in 'arr' and its corresponding element # in 'b2', we check the sign of the element in 'b2'. If it is positive, # we then check whether its sum with the element in 'arr' exceeds # np.iinfo(np.int64).max. If so, we have an overflow error. If it # it is negative, we then check whether its sum with the element in # 'arr' exceeds np.iinfo(np.int64).min. If so, we have an overflow # error as well. mask1 = b2 > 0 mask2 = b2 < 0 if not mask1.any(): to_raise = ((np.iinfo(np.int64).min - b2 > arr) & not_nan).any() elif not mask2.any(): to_raise = ((np.iinfo(np.int64).max - b2 < arr) & not_nan).any() else: to_raise = (((np.iinfo(np.int64).max - b2[mask1] < arr[mask1]) & not_nan[mask1]).any() or ((np.iinfo(np.int64).min - b2[mask2] > arr[mask2]) & not_nan[mask2]).any()) if to_raise: raise OverflowError("Overflow in int64 addition") return arr + b
<SYSTEM_TASK:> Compute sample quantile or quantiles of the input array. For example, q=0.5 <END_TASK> <USER_TASK:> Description: def quantile(x, q, interpolation_method='fraction'): """ Compute sample quantile or quantiles of the input array. For example, q=0.5 computes the median. The `interpolation_method` parameter supports three values, namely `fraction` (default), `lower` and `higher`. Interpolation is done only, if the desired quantile lies between two data points `i` and `j`. For `fraction`, the result is an interpolated value between `i` and `j`; for `lower`, the result is `i`, for `higher` the result is `j`. Parameters ---------- x : ndarray Values from which to extract score. q : scalar or array Percentile at which to extract score. interpolation_method : {'fraction', 'lower', 'higher'}, optional This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: - fraction: `i + (j - i)*fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. -lower: `i`. - higher: `j`. Returns ------- score : float Score at percentile. Examples -------- >>> from scipy import stats >>> a = np.arange(100) >>> stats.scoreatpercentile(a, 50) 49.5 """
x = np.asarray(x) mask = isna(x) x = x[~mask] values = np.sort(x) def _interpolate(a, b, fraction): """Returns the point at the given fraction between a and b, where 'fraction' must be between 0 and 1. """ return a + (b - a) * fraction def _get_score(at): if len(values) == 0: return np.nan idx = at * (len(values) - 1) if idx % 1 == 0: score = values[int(idx)] else: if interpolation_method == 'fraction': score = _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1) elif interpolation_method == 'lower': score = values[np.floor(idx)] elif interpolation_method == 'higher': score = values[np.ceil(idx)] else: raise ValueError("interpolation_method can only be 'fraction' " ", 'lower' or 'higher'") return score if is_scalar(q): return _get_score(q) else: q = np.asarray(q, np.float64) return algos.arrmap_float64(q, _get_score)
<SYSTEM_TASK:> Convert a SparseSeries to a scipy.sparse.coo_matrix using index <END_TASK> <USER_TASK:> Description: def _sparse_series_to_coo(ss, row_levels=(0, ), column_levels=(1, ), sort_labels=False): """ Convert a SparseSeries to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels. """
import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > 2') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo ' 'transformation.') # to keep things simple, only rely on integer indexing (not labels) row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix( (v, (i, j)), shape=(len(rows), len(columns))) return sparse_matrix, rows, columns
<SYSTEM_TASK:> Convert a scipy.sparse.coo_matrix to a SparseSeries. <END_TASK> <USER_TASK:> Description: def _coo_to_sparse_series(A, dense_index=False): """ Convert a scipy.sparse.coo_matrix to a SparseSeries. Use the defaults given in the SparseSeries constructor. """
s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) s = s.sort_index() s = s.to_sparse() # TODO: specify kind? if dense_index: # is there a better constructor method to use here? i = range(A.shape[0]) j = range(A.shape[1]) ind = MultiIndex.from_product([i, j]) s = s.reindex(ind) return s
<SYSTEM_TASK:> Wrap comparison operations to convert datetime-like to datetime64 <END_TASK> <USER_TASK:> Description: def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """
opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented other = lib.item_from_zerodim(other) if isinstance(other, (datetime, np.datetime64, str)): if isinstance(other, (datetime, np.datetime64)): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) try: other = _to_M8(other, tz=self.tz) except ValueError: # string that cannot be parsed to Timestamp return ops.invalid_comparison(self, other, op) result = op(self.asi8, other.view('i8')) if isna(other): result.fill(nat_result) elif lib.is_scalar(other) or np.ndim(other) == 0: return ops.invalid_comparison(self, other, op) elif len(other) != len(self): raise ValueError("Lengths must match") else: if isinstance(other, list): try: other = type(self)._from_sequence(other) except ValueError: other = np.array(other, dtype=np.object_) elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, DatetimeArray)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. return ops.invalid_comparison(self, other, op) if is_object_dtype(other): # We have to use _comp_method_OBJECT_ARRAY instead of numpy # comparison otherwise it would fail to raise when # comparing tz-aware and tz-naive with np.errstate(all='ignore'): result = ops._comp_method_OBJECT_ARRAY(op, self.astype(object), other) o_mask = isna(other) elif not (is_datetime64_dtype(other) or is_datetime64tz_dtype(other)): # e.g. is_timedelta64_dtype(other) return ops.invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) if isinstance(other, (ABCIndexClass, ABCSeries)): other = other.array if (is_datetime64_dtype(other) and not is_datetime64_ns_dtype(other) or not hasattr(other, 'asi8')): # e.g. other.dtype == 'datetime64[s]' # or an object-dtype ndarray other = type(self)._from_sequence(other) result = op(self.view('i8'), other.view('i8')) o_mask = other._isnan result = com.values_from_object(result) if o_mask.any(): result[o_mask] = nat_result if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
<SYSTEM_TASK:> Convert data to array of timestamps. <END_TASK> <USER_TASK:> Description: def objects_to_datetime64ns(data, dayfirst, yearfirst, utc=False, errors="raise", require_iso8601=False, allow_object=False): """ Convert data to array of timestamps. Parameters ---------- data : np.ndarray[object] dayfirst : bool yearfirst : bool utc : bool, default False Whether to convert timezone-aware timestamps to UTC errors : {'raise', 'ignore', 'coerce'} allow_object : bool Whether to return an object-dtype ndarray instead of raising if the data contains more than one timezone. Returns ------- result : ndarray np.int64 dtype if returned values represent UTC timestamps np.datetime64[ns] if returned values represent wall times object if mixed timezones inferred_tz : tzinfo or None Raises ------ ValueError : if data cannot be converted to datetimes """
assert errors in ["raise", "ignore", "coerce"] # if str-dtype, convert data = np.array(data, copy=False, dtype=np.object_) try: result, tz_parsed = tslib.array_to_datetime( data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, require_iso8601=require_iso8601 ) except ValueError as e: try: values, tz_parsed = conversion.datetime_to_datetime64(data) # If tzaware, these values represent unix timestamps, so we # return them as i8 to distinguish from wall times return values.view('i8'), tz_parsed except (ValueError, TypeError): raise e if tz_parsed is not None: # We can take a shortcut since the datetime64 numpy array # is in UTC # Return i8 values to denote unix timestamps return result.view('i8'), tz_parsed elif is_datetime64_dtype(result): # returning M8[ns] denotes wall-times; since tz is None # the distinction is a thin one return result, tz_parsed elif is_object_dtype(result): # GH#23675 when called via `pd.to_datetime`, returning an object-dtype # array is allowed. When called via `pd.DatetimeIndex`, we can # only accept datetime64 dtype, so raise TypeError if object-dtype # is returned, as that indicates the values can be recognized as # datetimes but they have conflicting timezones/awareness if allow_object: return result, tz_parsed raise TypeError(result) else: # pragma: no cover # GH#23675 this TypeError should never be hit, whereas the TypeError # in the object-dtype branch above is reachable. raise TypeError(result)
<SYSTEM_TASK:> Convert data based on dtype conventions, issuing deprecation warnings <END_TASK> <USER_TASK:> Description: def maybe_convert_dtype(data, copy): """ Convert data based on dtype conventions, issuing deprecation warnings or errors where appropriate. Parameters ---------- data : np.ndarray or pd.Index copy : bool Returns ------- data : np.ndarray or pd.Index copy : bool Raises ------ TypeError : PeriodDType data is passed """
if is_float_dtype(data): # Note: we must cast to datetime64[ns] here in order to treat these # as wall-times instead of UTC timestamps. data = data.astype(_NS_DTYPE) copy = False # TODO: deprecate this behavior to instead treat symmetrically # with integer dtypes. See discussion in GH#23675 elif is_timedelta64_dtype(data): warnings.warn("Passing timedelta64-dtype data is deprecated, will " "raise a TypeError in a future version", FutureWarning, stacklevel=5) data = data.view(_NS_DTYPE) elif is_period_dtype(data): # Note: without explicitly raising here, PeriodIndex # test_setops.test_join_does_not_recur fails raise TypeError("Passing PeriodDtype data is invalid. " "Use `data.to_timestamp()` instead") elif is_categorical_dtype(data): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): # Includes categorical # TODO: We have no tests for these data = np.array(data, dtype=np.object_) copy = False return data, copy
<SYSTEM_TASK:> If a timezone is inferred from data, check that it is compatible with <END_TASK> <USER_TASK:> Description: def maybe_infer_tz(tz, inferred_tz): """ If a timezone is inferred from data, check that it is compatible with the user-provided timezone, if any. Parameters ---------- tz : tzinfo or None inferred_tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if both timezones are present but do not match """
if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError('data is already tz-aware {inferred_tz}, unable to ' 'set specified tz: {tz}' .format(inferred_tz=inferred_tz, tz=tz)) return tz
<SYSTEM_TASK:> If the given dtype is a DatetimeTZDtype, extract the implied <END_TASK> <USER_TASK:> Description: def validate_tz_from_dtype(dtype, tz): """ If the given dtype is a DatetimeTZDtype, extract the implied tzinfo object from it and check that it does not conflict with the given tz. Parameters ---------- dtype : dtype, str tz : None, tzinfo Returns ------- tz : consensus tzinfo Raises ------ ValueError : on tzinfo mismatch """
if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: # Things like `datetime64[ns]`, which is OK for the # constructors, but also nonsense, which should be validated # but not by us. We *do* allow non-existent tz errors to # go through pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a dtype" " with a tz") tz = dtz if tz is not None and is_datetime64_dtype(dtype): # We also need to check for the case where the user passed a # tz-naive dtype (i.e. datetime64[ns]) if tz is not None and not timezones.tz_compare(tz, dtz): raise ValueError("cannot supply both a tz and a " "timezone-naive dtype (i.e. datetime64[ns])") return tz
<SYSTEM_TASK:> If a timezone is not explicitly given via `tz`, see if one can <END_TASK> <USER_TASK:> Description: def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can be inferred from the `start` and `end` endpoints. If more than one of these inputs provides a timezone, require that they all agree. Parameters ---------- start : Timestamp end : Timestamp tz : tzinfo or None Returns ------- tz : tzinfo or None Raises ------ TypeError : if start and end timezones do not agree """
try: inferred_tz = timezones.infer_tzinfo(start, end) except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") elif inferred_tz is not None: tz = inferred_tz return tz
<SYSTEM_TASK:> Localize a start or end Timestamp to the timezone of the corresponding <END_TASK> <USER_TASK:> Description: def _maybe_localize_point(ts, is_none, is_not_none, freq, tz): """ Localize a start or end Timestamp to the timezone of the corresponding start or end Timestamp Parameters ---------- ts : start or end Timestamp to potentially localize is_none : argument that should be None is_not_none : argument that should not be None freq : Tick, DateOffset, or None tz : str, timezone object or None Returns ------- ts : Timestamp """
# Make sure start and end are timezone localized if: # 1) freq = a Timedelta-like frequency (Tick) # 2) freq = None i.e. generating a linspaced range if isinstance(freq, Tick) or freq is None: localize_args = {'tz': tz, 'ambiguous': False} else: localize_args = {'tz': None} if is_none is None and is_not_none is not None: ts = ts.tz_localize(**localize_args) return ts
<SYSTEM_TASK:> Add a timedelta-like, Tick, or TimedeltaIndex-like object <END_TASK> <USER_TASK:> Description: def _add_delta(self, delta): """ Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new DatetimeArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : DatetimeArray """
new_values = super()._add_delta(delta) return type(self)._from_sequence(new_values, tz=self.tz, freq='infer')
<SYSTEM_TASK:> Convert times to midnight. <END_TASK> <USER_TASK:> Description: def normalize(self): """ Convert times to midnight. The time component of the date-time is converted to midnight i.e. 00:00:00. This is useful in cases, when the time does not matter. Length is unaltered. The timezones are unaffected. This method is available on Series with datetime values under the ``.dt`` accessor, and directly on Datetime Array/Index. Returns ------- DatetimeArray, DatetimeIndex or Series The same type as the original data. Series will have the same name and index. DatetimeIndex will have the same name. See Also -------- floor : Floor the datetimes to the specified freq. ceil : Ceil the datetimes to the specified freq. round : Round the datetimes to the specified freq. Examples -------- >>> idx = pd.date_range(start='2014-08-01 10:00', freq='H', ... periods=3, tz='Asia/Calcutta') >>> idx DatetimeIndex(['2014-08-01 10:00:00+05:30', '2014-08-01 11:00:00+05:30', '2014-08-01 12:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq='H') >>> idx.normalize() DatetimeIndex(['2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30', '2014-08-01 00:00:00+05:30'], dtype='datetime64[ns, Asia/Calcutta]', freq=None) """
if self.tz is None or timezones.is_utc(self.tz): not_null = ~self.isna() DAY_NS = ccalendar.DAY_SECONDS * 1000000000 new_values = self.asi8.copy() adjustment = (new_values[not_null] % DAY_NS) new_values[not_null] = new_values[not_null] - adjustment else: new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) return type(self)._from_sequence(new_values, freq='infer').tz_localize(self.tz)
<SYSTEM_TASK:> Calculate TimedeltaArray of difference between index <END_TASK> <USER_TASK:> Description: def to_perioddelta(self, freq): """ Calculate TimedeltaArray of difference between index values and index converted to PeriodArray at specified freq. Used for vectorized offsets Parameters ---------- freq : Period frequency Returns ------- TimedeltaArray/Index """
# TODO: consider privatizing (discussion in GH#23113) from pandas.core.arrays.timedeltas import TimedeltaArray i8delta = self.asi8 - self.to_period(freq).to_timestamp().asi8 m8delta = i8delta.view('m8[ns]') return TimedeltaArray(m8delta)
<SYSTEM_TASK:> Return the month names of the DateTimeIndex with specified locale. <END_TASK> <USER_TASK:> Description: def month_name(self, locale=None): """ Return the month names of the DateTimeIndex with specified locale. .. versionadded:: 0.23.0 Parameters ---------- locale : str, optional Locale determining the language in which to return the month name. Default is English locale. Returns ------- Index Index of month names. Examples -------- >>> idx = pd.date_range(start='2018-01', freq='M', periods=3) >>> idx DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31'], dtype='datetime64[ns]', freq='M') >>> idx.month_name() Index(['January', 'February', 'March'], dtype='object') """
if self.tz is not None and not timezones.is_utc(self.tz): values = self._local_timestamps() else: values = self.asi8 result = fields.get_date_name_field(values, 'month_name', locale=locale) result = self._maybe_mask_results(result, fill_value=None) return result
<SYSTEM_TASK:> Returns numpy array of datetime.time. The time part of the Timestamps. <END_TASK> <USER_TASK:> Description: def time(self): """ Returns numpy array of datetime.time. The time part of the Timestamps. """
# If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC if self.tz is not None and not timezones.is_utc(self.tz): timestamps = self._local_timestamps() else: timestamps = self.asi8 return tslib.ints_to_pydatetime(timestamps, box="time")
<SYSTEM_TASK:> Yield information about all public API items. <END_TASK> <USER_TASK:> Description: def get_api_items(api_doc_fd): """ Yield information about all public API items. Parse api.rst file from the documentation, and extract all the functions, methods, classes, attributes... This should include all pandas public API. Parameters ---------- api_doc_fd : file descriptor A file descriptor of the API documentation page, containing the table of contents with all the public API. Yields ------ name : str The name of the object (e.g. 'pandas.Series.str.upper). func : function The object itself. In most cases this will be a function or method, but it can also be classes, properties, cython objects... section : str The name of the section in the API page where the object item is located. subsection : str The name of the subsection in the API page where the object item is located. """
current_module = 'pandas' previous_line = current_section = current_subsection = '' position = None for line in api_doc_fd: line = line.strip() if len(line) == len(previous_line): if set(line) == set('-'): current_section = previous_line continue if set(line) == set('~'): current_subsection = previous_line continue if line.startswith('.. currentmodule::'): current_module = line.replace('.. currentmodule::', '').strip() continue if line == '.. autosummary::': position = 'autosummary' continue if position == 'autosummary': if line == '': position = 'items' continue if position == 'items': if line == '': position = None continue item = line.strip() func = importlib.import_module(current_module) for part in item.split('.'): func = getattr(func, part) yield ('.'.join([current_module, item]), func, current_section, current_subsection) previous_line = line
<SYSTEM_TASK:> Validate the docstring for the given func_name <END_TASK> <USER_TASK:> Description: def validate_one(func_name): """ Validate the docstring for the given func_name Parameters ---------- func_name : function Function whose docstring will be evaluated (e.g. pandas.read_csv). Returns ------- dict A dictionary containing all the information obtained from validating the docstring. """
doc = Docstring(func_name) errs, wrns, examples_errs = get_validation_data(doc) return {'type': doc.type, 'docstring': doc.clean_doc, 'deprecated': doc.deprecated, 'file': doc.source_file_name, 'file_line': doc.source_file_def_line, 'github_link': doc.github_url, 'errors': errs, 'warnings': wrns, 'examples_errors': examples_errs}
<SYSTEM_TASK:> Execute the validation of all docstrings, and return a dict with the <END_TASK> <USER_TASK:> Description: def validate_all(prefix, ignore_deprecated=False): """ Execute the validation of all docstrings, and return a dict with the results. Parameters ---------- prefix : str or None If provided, only the docstrings that start with this pattern will be validated. If None, all docstrings will be validated. ignore_deprecated: bool, default False If True, deprecated objects are ignored when validating docstrings. Returns ------- dict A dictionary with an item for every function/method... containing all the validation information. """
result = {} seen = {} # functions from the API docs api_doc_fnames = os.path.join( BASE_PATH, 'doc', 'source', 'reference', '*.rst') api_items = [] for api_doc_fname in glob.glob(api_doc_fnames): with open(api_doc_fname) as f: api_items += list(get_api_items(f)) for func_name, func_obj, section, subsection in api_items: if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info shared_code_key = doc_info['file'], doc_info['file_line'] shared_code = seen.get(shared_code_key, '') result[func_name].update({'in_api': True, 'section': section, 'subsection': subsection, 'shared_code_with': shared_code}) seen[shared_code_key] = func_name # functions from introspecting Series, DataFrame and Panel api_item_names = set(list(zip(*api_items))[0]) for class_ in (pandas.Series, pandas.DataFrame, pandas.Panel): for member in inspect.getmembers(class_): func_name = 'pandas.{}.{}'.format(class_.__name__, member[0]) if (not member[0].startswith('_') and func_name not in api_item_names): if prefix and not func_name.startswith(prefix): continue doc_info = validate_one(func_name) if ignore_deprecated and doc_info['deprecated']: continue result[func_name] = doc_info result[func_name]['in_api'] = False return result
<SYSTEM_TASK:> Import Python object from its name as string. <END_TASK> <USER_TASK:> Description: def _load_obj(name): """ Import Python object from its name as string. Parameters ---------- name : str Object name to import (e.g. pandas.Series.str.upper) Returns ------- object Python object that can be a class, method, function... Examples -------- >>> Docstring._load_obj('pandas.Series') <class 'pandas.core.series.Series'> """
for maxsplit in range(1, name.count('.') + 1): # TODO when py3 only replace by: module, *func_parts = ... func_name_split = name.rsplit('.', maxsplit) module = func_name_split[0] func_parts = func_name_split[1:] try: obj = importlib.import_module(module) except ImportError: pass else: continue if 'obj' not in locals(): raise ImportError('No module can be imported ' 'from "{}"'.format(name)) for part in func_parts: obj = getattr(obj, part) return obj
<SYSTEM_TASK:> Find the Python object that contains the source code of the object. <END_TASK> <USER_TASK:> Description: def _to_original_callable(obj): """ Find the Python object that contains the source code of the object. This is useful to find the place in the source code (file and line number) where a docstring is defined. It does not currently work for all cases, but it should help find some (properties...). """
while True: if inspect.isfunction(obj) or inspect.isclass(obj): f = inspect.getfile(obj) if f.startswith('<') and f.endswith('>'): return None return obj if inspect.ismethod(obj): obj = obj.__func__ elif isinstance(obj, functools.partial): obj = obj.func elif isinstance(obj, property): obj = obj.fget else: return None
<SYSTEM_TASK:> Convert numpy types to Python types for the Excel writers. <END_TASK> <USER_TASK:> Description: def _value_with_fmt(self, val): """Convert numpy types to Python types for the Excel writers. Parameters ---------- val : object Value to be written into cells Returns ------- Tuple with the first element being the converted value and the second being an optional format """
fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif isinstance(val, datetime): fmt = self.datetime_format elif isinstance(val, date): fmt = self.date_format elif isinstance(val, timedelta): val = val.total_seconds() / float(86400) fmt = '0' else: val = compat.to_str(val) return val, fmt
<SYSTEM_TASK:> checks that path's extension against the Writer's supported <END_TASK> <USER_TASK:> Description: def check_extension(cls, ext): """checks that path's extension against the Writer's supported extensions. If it isn't supported, raises UnsupportedFiletypeError."""
if ext.startswith('.'): ext = ext[1:] if not any(ext in extension for extension in cls.supported_extensions): msg = ("Invalid extension for engine '{engine}': '{ext}'" .format(engine=pprint_thing(cls.engine), ext=pprint_thing(ext))) raise ValueError(msg) else: return True
<SYSTEM_TASK:> Validate that the where statement is of the right type. <END_TASK> <USER_TASK:> Description: def _validate_where(w): """ Validate that the where statement is of the right type. The type may either be String, Expr, or list-like of Exprs. Parameters ---------- w : String term expression, Expr, or list-like of Exprs. Returns ------- where : The original where clause if the check was successful. Raises ------ TypeError : An invalid data type was passed in for w (e.g. dict). """
if not (isinstance(w, (Expr, str)) or is_list_like(w)): raise TypeError("where must be passed as a string, Expr, " "or list-like of Exprs") return w
<SYSTEM_TASK:> loose checking if s is a pytables-acceptable expression <END_TASK> <USER_TASK:> Description: def maybe_expression(s): """ loose checking if s is a pytables-acceptable expression """
if not isinstance(s, str): return False ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',) # make sure we have an op at least return any(op in s for op in ops)
<SYSTEM_TASK:> create and return the op string for this TermValue <END_TASK> <USER_TASK:> Description: def generate(self, v): """ create and return the op string for this TermValue """
val = v.tostring(self.encoding) return "({lhs} {op} {val})".format(lhs=self.lhs, op=self.op, val=val)
<SYSTEM_TASK:> convert the expression that is in the term to something that is <END_TASK> <USER_TASK:> Description: def convert_value(self, v): """ convert the expression that is in the term to something that is accepted by pytables """
def stringify(value): if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) kind = _ensure_decoded(self.kind) meta = _ensure_decoded(self.meta) if kind == 'datetime64' or kind == 'datetime': if isinstance(v, (int, float)): v = stringify(v) v = _ensure_decoded(v) v = Timestamp(v) if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v.value, kind) elif kind == 'timedelta64' or kind == 'timedelta': v = Timedelta(v, unit='s').value return TermValue(int(v), v, kind) elif meta == 'category': metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata # check that metadata contains v if not result and v not in metadata: result = -1 return TermValue(result, result, 'integer') elif kind == 'integer': v = int(float(v)) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = not v.strip().lower() in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): # string quoting return TermValue(v, stringify(v), 'string') else: raise TypeError("Cannot compare {v} of type {typ} to {kind} column" .format(v=v, typ=type(v), kind=kind))
<SYSTEM_TASK:> create and return the numexpr condition and filter <END_TASK> <USER_TASK:> Description: def evaluate(self): """ create and return the numexpr condition and filter """
try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid condition".format(expr=self.expr, slf=self)) try: self.filter = self.terms.prune(FilterBinOp) except AttributeError: raise ValueError("cannot process expression [{expr}], [{slf}] " "is not a valid filter".format(expr=self.expr, slf=self)) return self.condition, self.filter
<SYSTEM_TASK:> quote the string if not encoded <END_TASK> <USER_TASK:> Description: def tostring(self, encoding): """ quote the string if not encoded else encode and return """
if self.kind == 'string': if encoding is not None: return self.converted return '"{converted}"'.format(converted=self.converted) elif self.kind == 'float': # python 2 str(float) is not always # round-trippable so use repr() return repr(self.converted) return self.converted
<SYSTEM_TASK:> If 'Series.argmin' is called via the 'numpy' library, <END_TASK> <USER_TASK:> Description: def validate_argmin_with_skipna(skipna, args, kwargs): """ If 'Series.argmin' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """
skipna, args = process_skipna(skipna, args) validate_argmin(args, kwargs) return skipna
<SYSTEM_TASK:> If 'Series.argmax' is called via the 'numpy' library, <END_TASK> <USER_TASK:> Description: def validate_argmax_with_skipna(skipna, args, kwargs): """ If 'Series.argmax' is called via the 'numpy' library, the third parameter in its signature is 'out', which takes either an ndarray or 'None', so check if the 'skipna' parameter is either an instance of ndarray or is None, since 'skipna' itself should be a boolean """
skipna, args = process_skipna(skipna, args) validate_argmax(args, kwargs) return skipna
<SYSTEM_TASK:> If 'Categorical.argsort' is called via the 'numpy' library, the <END_TASK> <USER_TASK:> Description: def validate_argsort_with_ascending(ascending, args, kwargs): """ If 'Categorical.argsort' is called via the 'numpy' library, the first parameter in its signature is 'axis', which takes either an integer or 'None', so check if the 'ascending' parameter has either integer type or is None, since 'ascending' itself should be a boolean """
if is_integer(ascending) or ascending is None: args = (ascending,) + args ascending = True validate_argsort_kind(args, kwargs, max_fname_arg_count=3) return ascending
<SYSTEM_TASK:> If 'NDFrame.clip' is called via the numpy library, the third <END_TASK> <USER_TASK:> Description: def validate_clip_with_axis(axis, args, kwargs): """ If 'NDFrame.clip' is called via the numpy library, the third parameter in its signature is 'out', which can takes an ndarray, so check if the 'axis' parameter is an instance of ndarray, since 'axis' itself should either be an integer or None """
if isinstance(axis, ndarray): args = (axis,) + args axis = None validate_clip(args, kwargs) return axis