text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> If this function is called via the 'numpy' library, the third <END_TASK> <USER_TASK:> Description: def validate_cum_func_with_skipna(skipna, args, kwargs, name): """ If this function is called via the 'numpy' library, the third parameter in its signature is 'dtype', which takes either a 'numpy' dtype or 'None', so check if the 'skipna' parameter is a boolean or not """
if not is_bool(skipna): args = (skipna,) + args skipna = True validate_cum_func(args, kwargs, fname=name) return skipna
<SYSTEM_TASK:> If this function is called via the 'numpy' library, the third <END_TASK> <USER_TASK:> Description: def validate_take_with_convert(convert, args, kwargs): """ If this function is called via the 'numpy' library, the third parameter in its signature is 'axis', which takes either an ndarray or 'None', so check if the 'convert' parameter is either an instance of ndarray or is None """
if isinstance(convert, ndarray) or convert is None: args = (convert,) + args convert = True validate_take(args, kwargs, max_fname_arg_count=3, method='both') return convert
<SYSTEM_TASK:> 'args' and 'kwargs' should be empty, except for allowed <END_TASK> <USER_TASK:> Description: def validate_groupby_func(name, args, kwargs, allowed=None): """ 'args' and 'kwargs' should be empty, except for allowed kwargs because all of their necessary parameters are explicitly listed in the function signature """
if allowed is None: allowed = [] kwargs = set(kwargs) - set(allowed) if len(args) + len(kwargs) > 0: raise UnsupportedFunctionCall(( "numpy operations are not valid " "with groupby. Use .groupby(...)." "{func}() instead".format(func=name)))
<SYSTEM_TASK:> 'args' and 'kwargs' should be empty because all of <END_TASK> <USER_TASK:> Description: def validate_resampler_func(method, args, kwargs): """ 'args' and 'kwargs' should be empty because all of their necessary parameters are explicitly listed in the function signature """
if len(args) + len(kwargs) > 0: if method in RESAMPLER_NUMPY_OPS: raise UnsupportedFunctionCall(( "numpy operations are not valid " "with resample. Use .resample(...)." "{func}() instead".format(func=method))) else: raise TypeError("too many arguments passed in")
<SYSTEM_TASK:> Ensure that the axis argument passed to min, max, argmin, or argmax is <END_TASK> <USER_TASK:> Description: def validate_minmax_axis(axis): """ Ensure that the axis argument passed to min, max, argmin, or argmax is zero or None, as otherwise it will be incorrectly ignored. Parameters ---------- axis : int or None Raises ------ ValueError """
ndim = 1 # hard-coded for Index if axis is None: return if axis >= ndim or (axis < 0 and ndim + axis < 0): raise ValueError("`axis` must be fewer than the number of " "dimensions ({ndim})".format(ndim=ndim))
<SYSTEM_TASK:> Load msgpack pandas object from the specified <END_TASK> <USER_TASK:> Description: def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs): """ Load msgpack pandas object from the specified file path THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path_or_buf : string File path, BytesIO like or string encoding : Encoding for decoding msgpack str type iterator : boolean, if True, return an iterator to the unpacker (default is False) Returns ------- obj : same type as object stored in file """
path_or_buf, _, _, should_close = get_filepath_or_buffer(path_or_buf) if iterator: return Iterator(path_or_buf) def read(fh): unpacked_obj = list(unpack(fh, encoding=encoding, **kwargs)) if len(unpacked_obj) == 1: return unpacked_obj[0] if should_close: try: path_or_buf.close() except IOError: pass return unpacked_obj # see if we have an actual file if isinstance(path_or_buf, str): try: exists = os.path.exists(path_or_buf) except (TypeError, ValueError): exists = False if exists: with open(path_or_buf, 'rb') as fh: return read(fh) if isinstance(path_or_buf, bytes): # treat as a binary-like fh = None try: fh = BytesIO(path_or_buf) return read(fh) finally: if fh is not None: fh.close() elif hasattr(path_or_buf, 'read') and callable(path_or_buf.read): # treat as a buffer like return read(path_or_buf) raise ValueError('path_or_buf needs to be a string file path or file-like')
<SYSTEM_TASK:> return my dtype mapping, whether number or name <END_TASK> <USER_TASK:> Description: def dtype_for(t): """ return my dtype mapping, whether number or name """
if t in dtype_dict: return dtype_dict[t] return np.typeDict.get(t, t)
<SYSTEM_TASK:> Convert strings to complex number instance with specified numpy type. <END_TASK> <USER_TASK:> Description: def c2f(r, i, ctype_name): """ Convert strings to complex number instance with specified numpy type. """
ftype = c2f_dict[ctype_name] return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
<SYSTEM_TASK:> convert the numpy values to a list <END_TASK> <USER_TASK:> Description: def convert(values): """ convert the numpy values to a list """
dtype = values.dtype if is_categorical_dtype(values): return values elif is_object_dtype(dtype): return values.ravel().tolist() if needs_i8_conversion(dtype): values = values.view('i8') v = values.ravel() if compressor == 'zlib': _check_zlib() # return string arrays like they are if dtype == np.object_: return v.tolist() # convert to a bytes array v = v.tostring() return ExtType(0, zlib.compress(v)) elif compressor == 'blosc': _check_blosc() # return string arrays like they are if dtype == np.object_: return v.tolist() # convert to a bytes array v = v.tostring() return ExtType(0, blosc.compress(v, typesize=dtype.itemsize)) # ndarray (on original dtype) return ExtType(0, v.tostring())
<SYSTEM_TASK:> Pack an object and return the packed bytes. <END_TASK> <USER_TASK:> Description: def pack(o, default=encode, encoding='utf-8', unicode_errors='strict', use_single_float=False, autoreset=1, use_bin_type=1): """ Pack an object and return the packed bytes. """
return Packer(default=default, encoding=encoding, unicode_errors=unicode_errors, use_single_float=use_single_float, autoreset=autoreset, use_bin_type=use_bin_type).pack(o)
<SYSTEM_TASK:> Convert a JSON string to pandas object. <END_TASK> <USER_TASK:> Description: def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, convert_axes=None, convert_dates=True, keep_default_dates=True, numpy=False, precise_float=False, date_unit=None, encoding=None, lines=False, chunksize=None, compression='infer'): """ Convert a JSON string to pandas object. Parameters ---------- path_or_buf : a valid JSON string or file-like, default: None The string could be a URL. Valid URL schemes include http, ftp, s3, gcs, and file. For file URLs, a host is expected. For instance, a local file could be ``file://localhost/path/to/table.json`` orient : string, Indication of expected JSON string format. Compatible JSON strings can be produced by ``to_json()`` with a corresponding orient value. The set of possible orients is: - ``'split'`` : dict like ``{index -> [index], columns -> [columns], data -> [values]}`` - ``'records'`` : list like ``[{column -> value}, ... , {column -> value}]`` - ``'index'`` : dict like ``{index -> {column -> value}}`` - ``'columns'`` : dict like ``{column -> {index -> value}}`` - ``'values'`` : just the values array The allowed and default values depend on the value of the `typ` parameter. * when ``typ == 'series'``, - allowed orients are ``{'split','records','index'}`` - default is ``'index'`` - The Series index must be unique for orient ``'index'``. * when ``typ == 'frame'``, - allowed orients are ``{'split','records','index', 'columns','values', 'table'}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. .. versionadded:: 0.23.0 'table' as an allowed value for the ``orient`` argument typ : type of object to recover (series or frame), default 'frame' dtype : boolean or dict, default None If True, infer dtypes; if a dict of column to dtype, then use those; if False, then don't infer dtypes at all, applies only to the data. For all ``orient`` values except ``'table'``, default is True. .. versionchanged:: 0.25.0 Not applicable for ``orient='table'``. convert_axes : boolean, default None Try to convert the axes to the proper dtypes. For all ``orient`` values except ``'table'``, default is True. .. versionchanged:: 0.25.0 Not applicable for ``orient='table'``. convert_dates : boolean, default True List of columns to parse for dates; If True, then try to parse datelike columns default is True; a column label is datelike if * it ends with ``'_at'``, * it ends with ``'_time'``, * it begins with ``'timestamp'``, * it is ``'modified'``, or * it is ``'date'`` keep_default_dates : boolean, default True If parsing dates, then parse the default datelike columns numpy : boolean, default False Direct decoding to numpy arrays. Supports numeric data only, but non-numeric column and index labels are supported. Note also that the JSON ordering MUST be the same for each term if numpy=True. precise_float : boolean, default False Set to enable usage of higher precision (strtod) function when decoding string to double values. Default (False) is to use fast but less precise builtin functionality date_unit : string, default None The timestamp unit to detect if converting dates. The default behaviour is to try and detect the correct precision, but if this is not desired then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds, milliseconds, microseconds or nanoseconds respectively. encoding : str, default is 'utf-8' The encoding to use to decode py3 bytes. .. versionadded:: 0.19.0 lines : boolean, default False Read the file as a json object per line. .. versionadded:: 0.19.0 chunksize : integer, default None Return JsonReader object for iteration. See the `line-delimted json docs <http://pandas.pydata.org/pandas-docs/stable/io.html#io-jsonl>`_ for more information on ``chunksize``. This can only be passed if `lines=True`. If this is None, the file will be read into memory all at once. .. versionadded:: 0.21.0 compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' For on-the-fly decompression of on-disk data. If 'infer', then use gzip, bz2, zip or xz if path_or_buf is a string ending in '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression otherwise. If using 'zip', the ZIP file must contain only one data file to be read in. Set to None for no decompression. .. versionadded:: 0.21.0 Returns ------- result : Series or DataFrame, depending on the value of `typ`. See Also -------- DataFrame.to_json Notes ----- Specific to ``orient='table'``, if a :class:`DataFrame` with a literal :class:`Index` name of `index` gets written with :func:`to_json`, the subsequent read operation will incorrectly set the :class:`Index` name to ``None``. This is because `index` is also used by :func:`DataFrame.to_json` to denote a missing :class:`Index` name, and the subsequent :func:`read_json` operation cannot distinguish between the two. The same limitation is encountered with a :class:`MultiIndex` and any names beginning with ``'level_'``. Examples -------- >>> df = pd.DataFrame([['a', 'b'], ['c', 'd']], ... index=['row 1', 'row 2'], ... columns=['col 1', 'col 2']) Encoding/decoding a Dataframe using ``'split'`` formatted JSON: >>> df.to_json(orient='split') '{"columns":["col 1","col 2"], "index":["row 1","row 2"], "data":[["a","b"],["c","d"]]}' >>> pd.read_json(_, orient='split') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'index'`` formatted JSON: >>> df.to_json(orient='index') '{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}' >>> pd.read_json(_, orient='index') col 1 col 2 row 1 a b row 2 c d Encoding/decoding a Dataframe using ``'records'`` formatted JSON. Note that index labels are not preserved with this encoding. >>> df.to_json(orient='records') '[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]' >>> pd.read_json(_, orient='records') col 1 col 2 0 a b 1 c d Encoding with Table Schema >>> df.to_json(orient='table') '{"schema": {"fields": [{"name": "index", "type": "string"}, {"name": "col 1", "type": "string"}, {"name": "col 2", "type": "string"}], "primaryKey": "index", "pandas_version": "0.20.0"}, "data": [{"index": "row 1", "col 1": "a", "col 2": "b"}, {"index": "row 2", "col 1": "c", "col 2": "d"}]}' """
if orient == 'table' and dtype: raise ValueError("cannot pass both dtype and orient='table'") if orient == 'table' and convert_axes: raise ValueError("cannot pass both convert_axes and orient='table'") if dtype is None and orient != 'table': dtype = True if convert_axes is None and orient != 'table': convert_axes = True compression = _infer_compression(path_or_buf, compression) filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer( path_or_buf, encoding=encoding, compression=compression, ) json_reader = JsonReader( filepath_or_buffer, orient=orient, typ=typ, dtype=dtype, convert_axes=convert_axes, convert_dates=convert_dates, keep_default_dates=keep_default_dates, numpy=numpy, precise_float=precise_float, date_unit=date_unit, encoding=encoding, lines=lines, chunksize=chunksize, compression=compression, ) if chunksize: return json_reader result = json_reader.read() if should_close: try: filepath_or_buffer.close() except: # noqa: flake8 pass return result
<SYSTEM_TASK:> Try to format axes if they are datelike. <END_TASK> <USER_TASK:> Description: def _format_axes(self): """ Try to format axes if they are datelike. """
if not self.obj.index.is_unique and self.orient in ( 'index', 'columns'): raise ValueError("DataFrame index must be unique for orient=" "'{orient}'.".format(orient=self.orient)) if not self.obj.columns.is_unique and self.orient in ( 'index', 'columns', 'records'): raise ValueError("DataFrame columns must be unique for orient=" "'{orient}'.".format(orient=self.orient))
<SYSTEM_TASK:> Read the whole JSON input into a pandas object. <END_TASK> <USER_TASK:> Description: def read(self): """ Read the whole JSON input into a pandas object. """
if self.lines and self.chunksize: obj = concat(self) elif self.lines: data = to_str(self.data) obj = self._get_object_parser( self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) self.close() return obj
<SYSTEM_TASK:> Checks that dict has only the appropriate keys for orient='split'. <END_TASK> <USER_TASK:> Description: def check_keys_split(self, decoded): """ Checks that dict has only the appropriate keys for orient='split'. """
bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys = ", ".join(bad_keys) raise ValueError("JSON data had unexpected key(s): {bad_keys}" .format(bad_keys=pprint_thing(bad_keys)))
<SYSTEM_TASK:> Try to convert axes. <END_TASK> <USER_TASK:> Description: def _convert_axes(self): """ Try to convert axes. """
for axis in self.obj._AXIS_NUMBERS.keys(): new_axis, result = self._try_convert_data( axis, self.obj._get_axis(axis), use_dtypes=False, convert_dates=True) if result: setattr(self.obj, axis, new_axis)
<SYSTEM_TASK:> Take a conversion function and possibly recreate the frame. <END_TASK> <USER_TASK:> Description: def _process_converter(self, f, filt=None): """ Take a conversion function and possibly recreate the frame. """
if filt is None: filt = lambda col, c: True needs_new_obj = False new_obj = dict() for i, (col, c) in enumerate(self.obj.iteritems()): if filt(col, c): new_data, result = f(col, c) if result: c = new_data needs_new_obj = True new_obj[i] = c if needs_new_obj: # possibly handle dup columns new_obj = DataFrame(new_obj, index=self.obj.index) new_obj.columns = self.obj.columns self.obj = new_obj
<SYSTEM_TASK:> Format an array for printing. <END_TASK> <USER_TASK:> Description: def format_array(values, formatter, float_format=None, na_rep='NaN', digits=None, space=None, justify='right', decimal='.', leading_space=None): """ Format an array for printing. Parameters ---------- values formatter float_format na_rep digits space justify decimal leading_space : bool, optional Whether the array should be formatted with a leading space. When an array as a column of a Series or DataFrame, we do want the leading space to pad between columns. When formatting an Index subclass (e.g. IntervalIndex._format_native_types), we don't want the leading space since it should be left-aligned. Returns ------- List[str] """
if is_datetime64_dtype(values.dtype): fmt_klass = Datetime64Formatter elif is_datetime64tz_dtype(values): fmt_klass = Datetime64TZFormatter elif is_timedelta64_dtype(values.dtype): fmt_klass = Timedelta64Formatter elif is_extension_array_dtype(values.dtype): fmt_klass = ExtensionArrayFormatter elif is_float_dtype(values.dtype) or is_complex_dtype(values.dtype): fmt_klass = FloatArrayFormatter elif is_integer_dtype(values.dtype): fmt_klass = IntArrayFormatter else: fmt_klass = GenericArrayFormatter if space is None: space = get_option("display.column_space") if float_format is None: float_format = get_option("display.float_format") if digits is None: digits = get_option("display.precision") fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep, float_format=float_format, formatter=formatter, space=space, justify=justify, decimal=decimal, leading_space=leading_space) return fmt_obj.get_result()
<SYSTEM_TASK:> Outputs rounded and formatted percentiles. <END_TASK> <USER_TASK:> Description: def format_percentiles(percentiles): """ Outputs rounded and formatted percentiles. Parameters ---------- percentiles : list-like, containing floats from interval [0,1] Returns ------- formatted : list of strings Notes ----- Rounding precision is chosen so that: (1) if any two elements of ``percentiles`` differ, they remain different after rounding (2) no entry is *rounded* to 0% or 100%. Any non-integer is always rounded to at least 1 decimal place. Examples -------- Keeps all entries different after rounding: >>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999]) ['1.999%', '2.001%', '50%', '66.667%', '99.99%'] No element is rounded to 0% or 100% (unless already equal to it). Duplicates are allowed: >>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999]) ['0%', '50%', '2.0%', '50%', '66.67%', '99.99%'] """
percentiles = np.asarray(percentiles) # It checks for np.NaN as well with np.errstate(invalid='ignore'): if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \ or not np.all(percentiles <= 1): raise ValueError("percentiles should all be in the interval [0,1]") percentiles = 100 * percentiles int_idx = (percentiles.astype(int) == percentiles) if np.all(int_idx): out = percentiles.astype(int).astype(str) return [i + '%' for i in out] unique_pcts = np.unique(percentiles) to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None # Least precision that keeps percentiles unique after rounding prec = -np.floor(np.log10(np.min( np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end) ))).astype(int) prec = max(1, prec) out = np.empty_like(percentiles, dtype=object) out[int_idx] = percentiles[int_idx].astype(int).astype(str) out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) return [i + '%' for i in out]
<SYSTEM_TASK:> Return a formatter function for a range of timedeltas. <END_TASK> <USER_TASK:> Description: def _get_format_timedelta64(values, nat_rep='NaT', box=False): """ Return a formatter function for a range of timedeltas. These will all have the same format argument If box, then show the return in quotes """
values_int = values.astype(np.int64) consider_values = values_int != iNaT one_day_nanos = (86400 * 1e9) even_days = np.logical_and(consider_values, values_int % one_day_nanos != 0).sum() == 0 all_sub_day = np.logical_and( consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0 if even_days: format = None elif all_sub_day: format = 'sub_day' else: format = 'long' def _formatter(x): if x is None or (is_scalar(x) and isna(x)): return nat_rep if not isinstance(x, Timedelta): x = Timedelta(x) result = x._repr_base(format=format) if box: result = "'{res}'".format(res=result) return result return _formatter
<SYSTEM_TASK:> Separates the real and imaginary parts from the complex number, and <END_TASK> <USER_TASK:> Description: def _trim_zeros_complex(str_complexes, na_rep='NaN'): """ Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those. """
def separate_and_trim(str_complex, na_rep): num_arr = str_complex.split('+') return (_trim_zeros_float([num_arr[0]], na_rep) + ['+'] + _trim_zeros_float([num_arr[1][:-1]], na_rep) + ['j']) return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
<SYSTEM_TASK:> Trims zeros, leaving just one before the decimal points if need be. <END_TASK> <USER_TASK:> Description: def _trim_zeros_float(str_floats, na_rep='NaN'): """ Trims zeros, leaving just one before the decimal points if need be. """
trimmed = str_floats def _is_number(x): return (x != na_rep and not x.endswith('inf')) def _cond(values): finite = [x for x in values if _is_number(x)] return (len(finite) > 0 and all(x.endswith('0') for x in finite) and not (any(('e' in x) or ('E' in x) for x in finite))) while _cond(trimmed): trimmed = [x[:-1] if _is_number(x) else x for x in trimmed] # leave one 0 after the decimal points if need be. return [x + "0" if x.endswith('.') and _is_number(x) else x for x in trimmed]
<SYSTEM_TASK:> Alter default behavior on how float is formatted in DataFrame. <END_TASK> <USER_TASK:> Description: def set_eng_float_format(accuracy=3, use_eng_prefix=False): """ Alter default behavior on how float is formatted in DataFrame. Format float in engineering format. By accuracy, we mean the number of decimal digits after the floating point. See also EngFormatter. """
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix)) set_option("display.column_space", max(12, accuracy + 9))
<SYSTEM_TASK:> For each index in each level the function returns lengths of indexes. <END_TASK> <USER_TASK:> Description: def get_level_lengths(levels, sentinel=''): """For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ---------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index). """
if len(levels) == 0: return [] control = [True] * len(levels[0]) result = [] for level in levels: last_index = 0 lengths = {} for i, key in enumerate(level): if control[i] and key == sentinel: pass else: control[i] = False lengths[last_index] = i - last_index last_index = i lengths[last_index] = len(level) - last_index result.append(lengths) return result
<SYSTEM_TASK:> Appends lines to a buffer. <END_TASK> <USER_TASK:> Description: def buffer_put_lines(buf, lines): """ Appends lines to a buffer. Parameters ---------- buf The buffer to write to lines The lines to append. """
if any(isinstance(x, str) for x in lines): lines = [str(x) for x in lines] buf.write('\n'.join(lines))
<SYSTEM_TASK:> Calculate display width considering unicode East Asian Width <END_TASK> <USER_TASK:> Description: def len(self, text): """ Calculate display width considering unicode East Asian Width """
if not isinstance(text, str): return len(text) return sum(self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text)
<SYSTEM_TASK:> Returns a function to be applied on each value to format it <END_TASK> <USER_TASK:> Description: def _value_formatter(self, float_format=None, threshold=None): """Returns a function to be applied on each value to format it """
# the float_format parameter supersedes self.float_format if float_format is None: float_format = self.float_format # we are going to compose different functions, to first convert to # a string, then replace the decimal symbol, and finally chop according # to the threshold # when there is no float_format, we use str instead of '%g' # because str(0.0) = '0.0' while '%g' % 0.0 = '0' if float_format: def base_formatter(v): return float_format(value=v) if notna(v) else self.na_rep else: def base_formatter(v): return str(v) if notna(v) else self.na_rep if self.decimal != '.': def decimal_formatter(v): return base_formatter(v).replace('.', self.decimal, 1) else: decimal_formatter = base_formatter if threshold is None: return decimal_formatter def formatter(value): if notna(value): if abs(value) > threshold: return decimal_formatter(value) else: return decimal_formatter(0.0) else: return self.na_rep return formatter
<SYSTEM_TASK:> Returns the float values converted into strings using <END_TASK> <USER_TASK:> Description: def get_result_as_array(self): """ Returns the float values converted into strings using the parameters given at initialisation, as a numpy array """
if self.formatter is not None: return np.array([self.formatter(x) for x in self.values]) if self.fixed_width: threshold = get_option("display.chop_threshold") else: threshold = None # if we have a fixed_width, we'll need to try different float_format def format_values_with(float_format): formatter = self._value_formatter(float_format, threshold) # default formatter leaves a space to the left when formatting # floats, must be consistent for left-justifying NaNs (GH #25061) if self.justify == 'left': na_rep = ' ' + self.na_rep else: na_rep = self.na_rep # separate the wheat from the chaff values = self.values is_complex = is_complex_dtype(values) mask = isna(values) if hasattr(values, 'to_dense'): # sparse numpy ndarray values = values.to_dense() values = np.array(values, dtype='object') values[mask] = na_rep imask = (~mask).ravel() values.flat[imask] = np.array([formatter(val) for val in values.ravel()[imask]]) if self.fixed_width: if is_complex: return _trim_zeros_complex(values, na_rep) else: return _trim_zeros_float(values, na_rep) return values # There is a special default string when we are fixed-width # The default is otherwise to use str instead of a formatting string if self.float_format is None: if self.fixed_width: float_format = partial('{value: .{digits:d}f}'.format, digits=self.digits) else: float_format = self.float_format else: float_format = lambda value: self.float_format % value formatted_values = format_values_with(float_format) if not self.fixed_width: return formatted_values # we need do convert to engineering format if some values are too small # and would appear as 0, or if some values are too big and take too # much space if len(formatted_values) > 0: maxlen = max(len(x) for x in formatted_values) too_long = maxlen > self.digits + 6 else: too_long = False with np.errstate(invalid='ignore'): abs_vals = np.abs(self.values) # this is pretty arbitrary for now # large values: more that 8 characters including decimal symbol # and first digit, hence > 1e6 has_large_values = (abs_vals > 1e6).any() has_small_values = ((abs_vals < 10**(-self.digits)) & (abs_vals > 0)).any() if has_small_values or (too_long and has_large_values): float_format = partial('{value: .{digits:d}e}'.format, digits=self.digits) formatted_values = format_values_with(float_format) return formatted_values
<SYSTEM_TASK:> we by definition have a TZ <END_TASK> <USER_TASK:> Description: def _format_strings(self): """ we by definition have a TZ """
values = self.values.astype(object) is_dates_only = _is_dates_only(values) formatter = (self.formatter or _get_format_datetime64(is_dates_only, date_format=self.date_format)) fmt_values = [formatter(x) for x in values] return fmt_values
<SYSTEM_TASK:> Given an Interval or IntervalIndex, return the corresponding interval with <END_TASK> <USER_TASK:> Description: def _get_interval_closed_bounds(interval): """ Given an Interval or IntervalIndex, return the corresponding interval with closed bounds. """
left, right = interval.left, interval.right if interval.open_left: left = _get_next_label(left) if interval.open_right: right = _get_prev_label(right) return left, right
<SYSTEM_TASK:> Return a fixed frequency IntervalIndex <END_TASK> <USER_TASK:> Description: def interval_range(start=None, end=None, periods=None, freq=None, name=None, closed='right'): """ Return a fixed frequency IntervalIndex Parameters ---------- start : numeric or datetime-like, default None Left bound for generating intervals end : numeric or datetime-like, default None Right bound for generating intervals periods : integer, default None Number of periods to generate freq : numeric, string, or DateOffset, default None The length of each interval. Must be consistent with the type of start and end, e.g. 2 for numeric, or '5H' for datetime-like. Default is 1 for numeric and 'D' for datetime-like. name : string, default None Name of the resulting IntervalIndex closed : {'left', 'right', 'both', 'neither'}, default 'right' Whether the intervals are closed on the left-side, right-side, both or neither. Returns ------- rng : IntervalIndex See Also -------- IntervalIndex : An Index of intervals that are all closed on the same side. Notes ----- Of the four parameters ``start``, ``end``, ``periods``, and ``freq``, exactly three must be specified. If ``freq`` is omitted, the resulting ``IntervalIndex`` will have ``periods`` linearly spaced elements between ``start`` and ``end``, inclusively. To learn more about datetime-like frequency strings, please see `this link <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__. Examples -------- Numeric ``start`` and ``end`` is supported. >>> pd.interval_range(start=0, end=5) IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]], closed='right', dtype='interval[int64]') Additionally, datetime-like input is also supported. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... end=pd.Timestamp('2017-01-04')) IntervalIndex([(2017-01-01, 2017-01-02], (2017-01-02, 2017-01-03], (2017-01-03, 2017-01-04]], closed='right', dtype='interval[datetime64[ns]]') The ``freq`` parameter specifies the frequency between the left and right. endpoints of the individual intervals within the ``IntervalIndex``. For numeric ``start`` and ``end``, the frequency must also be numeric. >>> pd.interval_range(start=0, periods=4, freq=1.5) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') Similarly, for datetime-like ``start`` and ``end``, the frequency must be convertible to a DateOffset. >>> pd.interval_range(start=pd.Timestamp('2017-01-01'), ... periods=3, freq='MS') IntervalIndex([(2017-01-01, 2017-02-01], (2017-02-01, 2017-03-01], (2017-03-01, 2017-04-01]], closed='right', dtype='interval[datetime64[ns]]') Specify ``start``, ``end``, and ``periods``; the frequency is generated automatically (linearly spaced). >>> pd.interval_range(start=0, end=6, periods=4) IntervalIndex([(0.0, 1.5], (1.5, 3.0], (3.0, 4.5], (4.5, 6.0]], closed='right', dtype='interval[float64]') The ``closed`` parameter specifies which endpoints of the individual intervals within the ``IntervalIndex`` are closed. >>> pd.interval_range(end=5, periods=4, closed='both') IntervalIndex([[1, 2], [2, 3], [3, 4], [4, 5]], closed='both', dtype='interval[int64]') """
start = com.maybe_box_datetimelike(start) end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com._any_none(periods, start, end): freq = 1 if is_number(endpoint) else 'D' if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and ' 'freq, exactly three must be specified') if not _is_valid_endpoint(start): msg = 'start must be numeric or datetime-like, got {start}' raise ValueError(msg.format(start=start)) elif not _is_valid_endpoint(end): msg = 'end must be numeric or datetime-like, got {end}' raise ValueError(msg.format(end=end)) if is_float(periods): periods = int(periods) elif not is_integer(periods) and periods is not None: msg = 'periods must be a number, got {periods}' raise TypeError(msg.format(periods=periods)) if freq is not None and not is_number(freq): try: freq = to_offset(freq) except ValueError: raise ValueError('freq must be numeric or convertible to ' 'DateOffset, got {freq}'.format(freq=freq)) # verify type compatibility if not all([_is_type_compatible(start, end), _is_type_compatible(start, freq), _is_type_compatible(end, freq)]): raise TypeError("start, end, freq need to be type compatible") # +1 to convert interval count to breaks count (n breaks = n-1 intervals) if periods is not None: periods += 1 if is_number(endpoint): # force consistency between start/end/freq (lower end if freq skips it) if com._all_not_none(start, end, freq): end -= (end - start) % freq # compute the period/start/end if unspecified (at most one) if periods is None: periods = int((end - start) // freq) + 1 elif start is None: start = end - (periods - 1) * freq elif end is None: end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) if all(is_integer(x) for x in com._not_none(start, end, freq)): # np.linspace always produces float output breaks = maybe_downcast_to_dtype(breaks, 'int64') else: # delegate to the appropriate range function if isinstance(endpoint, Timestamp): range_func = date_range else: range_func = timedelta_range breaks = range_func(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed)
<SYSTEM_TASK:> Create the writer & save <END_TASK> <USER_TASK:> Description: def save(self): """ Create the writer & save """
# GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, 'write'): msg = ("compression has no effect when passing file-like " "object as input.") warnings.warn(msg, RuntimeWarning, stacklevel=2) # when zip compression is called. is_zip = isinstance(self.path_or_buf, ZipFile) or ( not hasattr(self.path_or_buf, 'write') and self.compression == 'zip') if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, 'write'): f = self.path_or_buf close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) close = True try: writer_kwargs = dict(lineterminator=self.line_terminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) if self.encoding == 'ascii': self.writer = csvlib.writer(f, **writer_kwargs) else: writer_kwargs['encoding'] = self.encoding self.writer = UnicodeWriter(f, **writer_kwargs) self._save() finally: if is_zip: # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, 'write'): self.path_or_buf.write(buf) else: f, handles = _get_handle(self.path_or_buf, self.mode, encoding=self.encoding, compression=self.compression) f.write(buf) close = True if close: f.close() for _fh in handles: _fh.close()
<SYSTEM_TASK:> Add delegated names to a class using a class decorator. This provides <END_TASK> <USER_TASK:> Description: def delegate_names(delegate, accessors, typ, overwrite=False): """ Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling `_add_delegate_accessors` below a class definition. Parameters ---------- delegate : object the class to get methods/properties & doc-strings accessors : Sequence[str] List of accessor to add typ : {'property', 'method'} overwrite : boolean, default False overwrite the method/property in the target class if it exists Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, ["categories", "ordered"], "property") class CategoricalAccessor(PandasDelegate): [...] """
def add_delegate_accessors(cls): cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite) return cls return add_delegate_accessors
<SYSTEM_TASK:> Add accessors to cls from the delegate class. <END_TASK> <USER_TASK:> Description: def _add_delegate_accessors(cls, delegate, accessors, typ, overwrite=False): """ Add accessors to cls from the delegate class. Parameters ---------- cls : the class to add the methods/properties to delegate : the class to get methods/properties & doc-strings accessors : string list of accessors to add typ : 'property' or 'method' overwrite : boolean, default False overwrite the method/property in the target class if it exists. """
def _create_delegator_property(name): def _getter(self): return self._delegate_property_get(name) def _setter(self, new_values): return self._delegate_property_set(name, new_values) _getter.__name__ = name _setter.__name__ = name return property(fget=_getter, fset=_setter, doc=getattr(delegate, name).__doc__) def _create_delegator_method(name): def f(self, *args, **kwargs): return self._delegate_method(name, *args, **kwargs) f.__name__ = name f.__doc__ = getattr(delegate, name).__doc__ return f for name in accessors: if typ == 'property': f = _create_delegator_property(name) else: f = _create_delegator_method(name) # don't overwrite existing methods/properties if overwrite or not hasattr(cls, name): setattr(cls, name, f)
<SYSTEM_TASK:> return a boolean if we WILL be using numexpr <END_TASK> <USER_TASK:> Description: def _can_use_numexpr(op, op_str, a, b, dtype_check): """ return a boolean if we WILL be using numexpr """
if op_str is not None: # required min elements (otherwise we are adding overhead) if np.prod(a.shape) > _MIN_ELEMENTS: # check for dtype compatibility dtypes = set() for o in [a, b]: if hasattr(o, 'get_dtype_counts'): s = o.get_dtype_counts() if len(s) > 1: return False dtypes |= set(s.index) elif isinstance(o, np.ndarray): dtypes |= {o.dtype.name} # allowed are a superset if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: return True return False
<SYSTEM_TASK:> evaluate and return the expression of the op on a and b <END_TASK> <USER_TASK:> Description: def evaluate(op, op_str, a, b, use_numexpr=True, **eval_kwargs): """ evaluate and return the expression of the op on a and b Parameters ---------- op : the actual operand op_str: the string version of the op a : left operand b : right operand use_numexpr : whether to try to use numexpr (default True) """
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b) if use_numexpr: return _evaluate(op, op_str, a, b, **eval_kwargs) return _evaluate_standard(op, op_str, a, b)
<SYSTEM_TASK:> evaluate the where condition cond on a and b <END_TASK> <USER_TASK:> Description: def where(cond, a, b, use_numexpr=True): """ evaluate the where condition cond on a and b Parameters ---------- cond : a boolean array a : return if cond is True b : return if cond is False use_numexpr : whether to try to use numexpr (default True) """
if use_numexpr: return _where(cond, a, b) return _where_standard(cond, a, b)
<SYSTEM_TASK:> Load a feather-format object from the file path <END_TASK> <USER_TASK:> Description: def read_feather(path, columns=None, use_threads=True): """ Load a feather-format object from the file path .. versionadded 0.20.0 Parameters ---------- path : string file path, or file-like object columns : sequence, default None If not provided, all columns are read .. versionadded 0.24.0 nthreads : int, default 1 Number of CPU threads to use when reading to pandas.DataFrame .. versionadded 0.21.0 .. deprecated 0.24.0 use_threads : bool, default True Whether to parallelize reading using multiple threads .. versionadded 0.24.0 Returns ------- type of object stored in file """
feather, pyarrow = _try_import() path = _stringify_path(path) if LooseVersion(pyarrow.__version__) < LooseVersion('0.11.0'): int_use_threads = int(use_threads) if int_use_threads < 1: int_use_threads = 1 return feather.read_feather(path, columns=columns, nthreads=int_use_threads) return feather.read_feather(path, columns=columns, use_threads=bool(use_threads))
<SYSTEM_TASK:> Generate a range of dates with the spans between dates described by <END_TASK> <USER_TASK:> Description: def generate_regular_range(start, end, periods, freq): """ Generate a range of dates with the spans between dates described by the given `freq` DateOffset. Parameters ---------- start : Timestamp or None first point of produced date range end : Timestamp or None last point of produced date range periods : int number of periods in produced date range freq : DateOffset describes space between dates in produced date range Returns ------- ndarray[np.int64] representing nanosecond unix timestamps """
if isinstance(freq, Tick): stride = freq.nanos if periods is None: b = Timestamp(start).value # cannot just use e = Timestamp(end) + 1 because arange breaks when # stride is too large, see GH10887 e = (b + (Timestamp(end).value - b) // stride * stride + stride // 2 + 1) # end.tz == start.tz by this point due to _generate implementation tz = start.tz elif start is not None: b = Timestamp(start).value e = _generate_range_overflow_safe(b, periods, stride, side='start') tz = start.tz elif end is not None: e = Timestamp(end).value + stride b = _generate_range_overflow_safe(e, periods, stride, side='end') tz = end.tz else: raise ValueError("at least 'start' or 'end' should be specified " "if a 'period' is given.") with np.errstate(over="raise"): # If the range is sufficiently large, np.arange may overflow # and incorrectly return an empty array if not caught. try: values = np.arange(b, e, stride, dtype=np.int64) except FloatingPointError: xdr = [b] while xdr[-1] != e: xdr.append(xdr[-1] + stride) values = np.array(xdr[:-1], dtype=np.int64) else: tz = None # start and end should have the same timezone by this point if start is not None: tz = start.tz elif end is not None: tz = end.tz xdr = generate_range(start=start, end=end, periods=periods, offset=freq) values = np.array([x.value for x in xdr], dtype=np.int64) return values, tz
<SYSTEM_TASK:> Calculate the second endpoint for passing to np.arange, checking <END_TASK> <USER_TASK:> Description: def _generate_range_overflow_safe(endpoint, periods, stride, side='start'): """ Calculate the second endpoint for passing to np.arange, checking to avoid an integer overflow. Catch OverflowError and re-raise as OutOfBoundsDatetime. Parameters ---------- endpoint : int nanosecond timestamp of the known endpoint of the desired range periods : int number of periods in the desired range stride : int nanoseconds between periods in the desired range side : {'start', 'end'} which end of the range `endpoint` refers to Returns ------- other_end : int Raises ------ OutOfBoundsDatetime """
# GH#14187 raise instead of incorrectly wrapping around assert side in ['start', 'end'] i64max = np.uint64(np.iinfo(np.int64).max) msg = ('Cannot generate range with {side}={endpoint} and ' 'periods={periods}' .format(side=side, endpoint=endpoint, periods=periods)) with np.errstate(over="raise"): # if periods * strides cannot be multiplied within the *uint64* bounds, # we cannot salvage the operation by recursing, so raise try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) except FloatingPointError: raise OutOfBoundsDatetime(msg) if np.abs(addend) <= i64max: # relatively easy case without casting concerns return _generate_range_overflow_safe_signed( endpoint, periods, stride, side) elif ((endpoint > 0 and side == 'start' and stride > 0) or (endpoint < 0 and side == 'end' and stride > 0)): # no chance of not-overflowing raise OutOfBoundsDatetime(msg) elif (side == 'end' and endpoint > i64max and endpoint - stride <= i64max): # in _generate_regular_range we added `stride` thereby overflowing # the bounds. Adjust to fix this. return _generate_range_overflow_safe(endpoint - stride, periods - 1, stride, side) # split into smaller pieces mid_periods = periods // 2 remaining = periods - mid_periods assert 0 < remaining < periods, (remaining, periods, endpoint, stride) midpoint = _generate_range_overflow_safe(endpoint, mid_periods, stride, side) return _generate_range_overflow_safe(midpoint, remaining, stride, side)
<SYSTEM_TASK:> Context manager for temporarily setting a locale. <END_TASK> <USER_TASK:> Description: def set_locale(new_locale, lc_var=locale.LC_ALL): """ Context manager for temporarily setting a locale. Parameters ---------- new_locale : str or tuple A string of the form <language_country>.<encoding>. For example to set the current locale to US English with a UTF8 encoding, you would pass "en_US.UTF-8". lc_var : int, default `locale.LC_ALL` The category of the locale being set. Notes ----- This is useful when you want to run a particular block of code under a particular locale, without globally setting the locale. This probably isn't thread-safe. """
current_locale = locale.getlocale() try: locale.setlocale(lc_var, new_locale) normalized_locale = locale.getlocale() if all(x is not None for x in normalized_locale): yield '.'.join(normalized_locale) else: yield new_locale finally: locale.setlocale(lc_var, current_locale)
<SYSTEM_TASK:> Check to see if we can set a locale, and subsequently get the locale, <END_TASK> <USER_TASK:> Description: def can_set_locale(lc, lc_var=locale.LC_ALL): """ Check to see if we can set a locale, and subsequently get the locale, without raising an Exception. Parameters ---------- lc : str The locale to attempt to set. lc_var : int, default `locale.LC_ALL` The category of the locale being set. Returns ------- is_valid : bool Whether the passed locale can be set """
try: with set_locale(lc, lc_var=lc_var): pass except (ValueError, locale.Error): # horrible name for a Exception subclass return False else: return True
<SYSTEM_TASK:> Return a list of normalized locales that do not throw an ``Exception`` <END_TASK> <USER_TASK:> Description: def _valid_locales(locales, normalize): """ Return a list of normalized locales that do not throw an ``Exception`` when set. Parameters ---------- locales : str A string where each locale is separated by a newline. normalize : bool Whether to call ``locale.normalize`` on each locale. Returns ------- valid_locales : list A list of valid locales. """
if normalize: normalizer = lambda x: locale.normalize(x.strip()) else: normalizer = lambda x: x.strip() return list(filter(can_set_locale, map(normalizer, locales)))
<SYSTEM_TASK:> Get all the locales that are available on the system. <END_TASK> <USER_TASK:> Description: def get_locales(prefix=None, normalize=True, locale_getter=_default_locale_getter): """ Get all the locales that are available on the system. Parameters ---------- prefix : str If not ``None`` then return only those locales with the prefix provided. For example to get all English language locales (those that start with ``"en"``), pass ``prefix="en"``. normalize : bool Call ``locale.normalize`` on the resulting list of available locales. If ``True``, only locales that can be set without throwing an ``Exception`` are returned. locale_getter : callable The function to use to retrieve the current locales. This should return a string with each locale separated by a newline character. Returns ------- locales : list of strings A list of locale strings that can be set with ``locale.setlocale()``. For example:: locale.setlocale(locale.LC_ALL, locale_string) On error will return None (no locale available, e.g. Windows) """
try: raw_locales = locale_getter() except Exception: return None try: # raw_locales is "\n" separated list of locales # it may contain non-decodable parts, so split # extract what we can and then rejoin. raw_locales = raw_locales.split(b'\n') out_locales = [] for x in raw_locales: out_locales.append(str( x, encoding=options.display.encoding)) except TypeError: pass if prefix is None: return _valid_locales(out_locales, normalize) pattern = re.compile('{prefix}.*'.format(prefix=prefix)) found = pattern.findall('\n'.join(out_locales)) return _valid_locales(found, normalize)
<SYSTEM_TASK:> Ensure that an array object has a float dtype if possible. <END_TASK> <USER_TASK:> Description: def ensure_float(arr): """ Ensure that an array object has a float dtype if possible. Parameters ---------- arr : array-like The array whose data type we want to enforce as float. Returns ------- float_arr : The original array cast to the float dtype if possible. Otherwise, the original array is returned. """
if issubclass(arr.dtype.type, (np.integer, np.bool_)): arr = arr.astype(float) return arr
<SYSTEM_TASK:> Ensure that an dtype array of some integer dtype <END_TASK> <USER_TASK:> Description: def ensure_int64_or_float64(arr, copy=False): """ Ensure that an dtype array of some integer dtype has an int64 dtype if possible If it's not possible, potentially because of overflow, convert the array to float64 instead. Parameters ---------- arr : array-like The array whose data type we want to enforce. copy: boolean Whether to copy the original array or reuse it in place, if possible. Returns ------- out_arr : The input array cast as int64 if possible without overflow. Otherwise the input array cast to float64. """
try: return arr.astype('int64', copy=copy, casting='safe') except TypeError: return arr.astype('float64', copy=copy)
<SYSTEM_TASK:> evaluate if the tipo is a subclass of the klasses <END_TASK> <USER_TASK:> Description: def classes_and_not_datetimelike(*klasses): """ evaluate if the tipo is a subclass of the klasses and not a datetimelike """
return lambda tipo: (issubclass(tipo, klasses) and not issubclass(tipo, (np.datetime64, np.timedelta64)))
<SYSTEM_TASK:> Check whether an array-like is a 1-D pandas sparse array. <END_TASK> <USER_TASK:> Description: def is_sparse(arr): """ Check whether an array-like is a 1-D pandas sparse array. Check that the one-dimensional array-like is a pandas sparse array. Returns True if it is a pandas sparse array, not another type of sparse array. Parameters ---------- arr : array-like Array-like to check. Returns ------- bool Whether or not the array-like is a pandas sparse array. See Also -------- DataFrame.to_sparse : Convert DataFrame to a SparseDataFrame. Series.to_sparse : Convert Series to SparseSeries. Series.to_dense : Return dense representation of a Series. Examples -------- Returns `True` if the parameter is a 1-D pandas sparse array. >>> is_sparse(pd.SparseArray([0, 0, 1, 0])) True >>> is_sparse(pd.SparseSeries([0, 0, 1, 0])) True Returns `False` if the parameter is not sparse. >>> is_sparse(np.array([0, 0, 1, 0])) False >>> is_sparse(pd.Series([0, 1, 0, 0])) False Returns `False` if the parameter is not a pandas sparse array. >>> from scipy.sparse import bsr_matrix >>> is_sparse(bsr_matrix([0, 1, 0, 0])) False Returns `False` if the parameter has more than one dimension. >>> df = pd.SparseDataFrame([389., 24., 80.5, np.nan], columns=['max_speed'], index=['falcon', 'parrot', 'lion', 'monkey']) >>> is_sparse(df) False >>> is_sparse(df.max_speed) True """
from pandas.core.arrays.sparse import SparseDtype dtype = getattr(arr, 'dtype', arr) return isinstance(dtype, SparseDtype)
<SYSTEM_TASK:> Check whether an array-like is a scipy.sparse.spmatrix instance. <END_TASK> <USER_TASK:> Description: def is_scipy_sparse(arr): """ Check whether an array-like is a scipy.sparse.spmatrix instance. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a scipy.sparse.spmatrix instance. Notes ----- If scipy is not installed, this function will always return False. Examples -------- >>> from scipy.sparse import bsr_matrix >>> is_scipy_sparse(bsr_matrix([1, 2, 3])) True >>> is_scipy_sparse(pd.SparseArray([1, 2, 3])) False >>> is_scipy_sparse(pd.SparseSeries([1, 2, 3])) False """
global _is_scipy_sparse if _is_scipy_sparse is None: try: from scipy.sparse import issparse as _is_scipy_sparse except ImportError: _is_scipy_sparse = lambda _: False return _is_scipy_sparse(arr)
<SYSTEM_TASK:> Check if obj or all elements of list-like is DateOffset <END_TASK> <USER_TASK:> Description: def is_offsetlike(arr_or_obj): """ Check if obj or all elements of list-like is DateOffset Parameters ---------- arr_or_obj : object Returns ------- boolean Whether the object is a DateOffset or listlike of DatetOffsets Examples -------- >>> is_offsetlike(pd.DateOffset(days=1)) True >>> is_offsetlike('offset') False >>> is_offsetlike([pd.offsets.Minute(4), pd.offsets.MonthEnd()]) True >>> is_offsetlike(np.array([pd.DateOffset(months=3), pd.Timestamp.now()])) False """
if isinstance(arr_or_obj, ABCDateOffset): return True elif (is_list_like(arr_or_obj) and len(arr_or_obj) and is_object_dtype(arr_or_obj)): return all(isinstance(x, ABCDateOffset) for x in arr_or_obj) return False
<SYSTEM_TASK:> Check whether an array-like is a periodical index. <END_TASK> <USER_TASK:> Description: def is_period(arr): """ Check whether an array-like is a periodical index. .. deprecated:: 0.24.0 Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a periodical index. Examples -------- >>> is_period([1, 2, 3]) False >>> is_period(pd.Index([1, 2, 3])) False >>> is_period(pd.PeriodIndex(["2017-01-01"], freq="D")) True """
warnings.warn("'is_period' is deprecated and will be removed in a future " "version. Use 'is_period_dtype' or is_period_arraylike' " "instead.", FutureWarning, stacklevel=2) return isinstance(arr, ABCPeriodIndex) or is_period_arraylike(arr)
<SYSTEM_TASK:> Check whether the provided array or dtype is of the string dtype. <END_TASK> <USER_TASK:> Description: def is_string_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of the string dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of the string dtype. Examples -------- >>> is_string_dtype(str) True >>> is_string_dtype(object) True >>> is_string_dtype(int) False >>> >>> is_string_dtype(np.array(['a', 'b'])) True >>> is_string_dtype(pd.Series([1, 2])) False """
# TODO: gh-15585: consider making the checks stricter. def condition(dtype): return dtype.kind in ('O', 'S', 'U') and not is_period_dtype(dtype) return _is_dtype(arr_or_dtype, condition)
<SYSTEM_TASK:> Check whether an array-like is a periodical array-like or PeriodIndex. <END_TASK> <USER_TASK:> Description: def is_period_arraylike(arr): """ Check whether an array-like is a periodical array-like or PeriodIndex. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a periodical array-like or PeriodIndex instance. Examples -------- >>> is_period_arraylike([1, 2, 3]) False >>> is_period_arraylike(pd.Index([1, 2, 3])) False >>> is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D")) True """
if isinstance(arr, (ABCPeriodIndex, ABCPeriodArray)): return True elif isinstance(arr, (np.ndarray, ABCSeries)): return is_period_dtype(arr.dtype) return getattr(arr, 'inferred_type', None) == 'period'
<SYSTEM_TASK:> Check whether an array-like is a datetime array-like or DatetimeIndex. <END_TASK> <USER_TASK:> Description: def is_datetime_arraylike(arr): """ Check whether an array-like is a datetime array-like or DatetimeIndex. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a datetime array-like or DatetimeIndex. Examples -------- >>> is_datetime_arraylike([1, 2, 3]) False >>> is_datetime_arraylike(pd.Index([1, 2, 3])) False >>> is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3])) True """
if isinstance(arr, ABCDatetimeIndex): return True elif isinstance(arr, (np.ndarray, ABCSeries)): return (is_object_dtype(arr.dtype) and lib.infer_dtype(arr, skipna=False) == 'datetime') return getattr(arr, 'inferred_type', None) == 'datetime'
<SYSTEM_TASK:> Check whether an array-like is a datetime-like array-like. <END_TASK> <USER_TASK:> Description: def is_datetimelike(arr): """ Check whether an array-like is a datetime-like array-like. Acceptable datetime-like objects are (but not limited to) datetime indices, periodic indices, and timedelta indices. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is a datetime-like array-like. Examples -------- >>> is_datetimelike([1, 2, 3]) False >>> is_datetimelike(pd.Index([1, 2, 3])) False >>> is_datetimelike(pd.DatetimeIndex([1, 2, 3])) True >>> is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> is_datetimelike(pd.PeriodIndex([], freq="A")) True >>> is_datetimelike(np.array([], dtype=np.datetime64)) True >>> is_datetimelike(pd.Series([], dtype="timedelta64[ns]")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_datetimelike(s) True """
return (is_datetime64_dtype(arr) or is_datetime64tz_dtype(arr) or is_timedelta64_dtype(arr) or isinstance(arr, ABCPeriodIndex))
<SYSTEM_TASK:> Check if two dtypes are equal. <END_TASK> <USER_TASK:> Description: def is_dtype_equal(source, target): """ Check if two dtypes are equal. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ---------- boolean Whether or not the two dtypes are equal. Examples -------- >>> is_dtype_equal(int, float) False >>> is_dtype_equal("int", int) True >>> is_dtype_equal(object, "category") False >>> is_dtype_equal(CategoricalDtype(), "category") True >>> is_dtype_equal(DatetimeTZDtype(), "datetime64") False """
try: source = _get_dtype(source) target = _get_dtype(target) return source == target except (TypeError, AttributeError): # invalid comparison # object == category will hit this return False
<SYSTEM_TASK:> Check whether two arrays have compatible dtypes to do a union. <END_TASK> <USER_TASK:> Description: def is_dtype_union_equal(source, target): """ Check whether two arrays have compatible dtypes to do a union. numpy types are checked with ``is_dtype_equal``. Extension types are checked separately. Parameters ---------- source : The first dtype to compare target : The second dtype to compare Returns ---------- boolean Whether or not the two dtypes are equal. >>> is_dtype_equal("int", int) True >>> is_dtype_equal(CategoricalDtype(['a', 'b'], ... CategoricalDtype(['b', 'c'])) True >>> is_dtype_equal(CategoricalDtype(['a', 'b'], ... CategoricalDtype(['b', 'c'], ordered=True)) False """
source = _get_dtype(source) target = _get_dtype(target) if is_categorical_dtype(source) and is_categorical_dtype(target): # ordered False for both return source.ordered is target.ordered return is_dtype_equal(source, target)
<SYSTEM_TASK:> Check if we are comparing a string-like object to a numeric ndarray. <END_TASK> <USER_TASK:> Description: def is_numeric_v_string_like(a, b): """ Check if we are comparing a string-like object to a numeric ndarray. NumPy doesn't like to compare such objects, especially numeric arrays and scalar string-likes. Parameters ---------- a : array-like, scalar The first object to check. b : array-like, scalar The second object to check. Returns ------- boolean Whether we return a comparing a string-like object to a numeric array. Examples -------- >>> is_numeric_v_string_like(1, 1) False >>> is_numeric_v_string_like("foo", "foo") False >>> is_numeric_v_string_like(1, "foo") # non-array numeric False >>> is_numeric_v_string_like(np.array([1]), "foo") True >>> is_numeric_v_string_like("foo", np.array([1])) # symmetric check True >>> is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"])) True >>> is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2])) True >>> is_numeric_v_string_like(np.array([1]), np.array([2])) False >>> is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"])) False """
is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) is_a_numeric_array = is_a_array and is_numeric_dtype(a) is_b_numeric_array = is_b_array and is_numeric_dtype(b) is_a_string_array = is_a_array and is_string_like_dtype(a) is_b_string_array = is_b_array and is_string_like_dtype(b) is_a_scalar_string_like = not is_a_array and is_string_like(a) is_b_scalar_string_like = not is_b_array and is_string_like(b) return ((is_a_numeric_array and is_b_scalar_string_like) or (is_b_numeric_array and is_a_scalar_string_like) or (is_a_numeric_array and is_b_string_array) or (is_b_numeric_array and is_a_string_array))
<SYSTEM_TASK:> Check if we are comparing a datetime-like object to a numeric object. <END_TASK> <USER_TASK:> Description: def is_datetimelike_v_numeric(a, b): """ Check if we are comparing a datetime-like object to a numeric object. By "numeric," we mean an object that is either of an int or float dtype. Parameters ---------- a : array-like, scalar The first object to check. b : array-like, scalar The second object to check. Returns ------- boolean Whether we return a comparing a datetime-like to a numeric object. Examples -------- >>> dt = np.datetime64(pd.datetime(2017, 1, 1)) >>> >>> is_datetimelike_v_numeric(1, 1) False >>> is_datetimelike_v_numeric(dt, dt) False >>> is_datetimelike_v_numeric(1, dt) True >>> is_datetimelike_v_numeric(dt, 1) # symmetric check True >>> is_datetimelike_v_numeric(np.array([dt]), 1) True >>> is_datetimelike_v_numeric(np.array([1]), dt) True >>> is_datetimelike_v_numeric(np.array([dt]), np.array([1])) True >>> is_datetimelike_v_numeric(np.array([1]), np.array([2])) False >>> is_datetimelike_v_numeric(np.array([dt]), np.array([dt])) False """
if not hasattr(a, 'dtype'): a = np.asarray(a) if not hasattr(b, 'dtype'): b = np.asarray(b) def is_numeric(x): """ Check if an object has a numeric dtype (i.e. integer or float). """ return is_integer_dtype(x) or is_float_dtype(x) is_datetimelike = needs_i8_conversion return ((is_datetimelike(a) and is_numeric(b)) or (is_datetimelike(b) and is_numeric(a)))
<SYSTEM_TASK:> Check if we are comparing a datetime-like object to an object instance. <END_TASK> <USER_TASK:> Description: def is_datetimelike_v_object(a, b): """ Check if we are comparing a datetime-like object to an object instance. Parameters ---------- a : array-like, scalar The first object to check. b : array-like, scalar The second object to check. Returns ------- boolean Whether we return a comparing a datetime-like to an object instance. Examples -------- >>> obj = object() >>> dt = np.datetime64(pd.datetime(2017, 1, 1)) >>> >>> is_datetimelike_v_object(obj, obj) False >>> is_datetimelike_v_object(dt, dt) False >>> is_datetimelike_v_object(obj, dt) True >>> is_datetimelike_v_object(dt, obj) # symmetric check True >>> is_datetimelike_v_object(np.array([dt]), obj) True >>> is_datetimelike_v_object(np.array([obj]), dt) True >>> is_datetimelike_v_object(np.array([dt]), np.array([obj])) True >>> is_datetimelike_v_object(np.array([obj]), np.array([obj])) False >>> is_datetimelike_v_object(np.array([dt]), np.array([1])) False >>> is_datetimelike_v_object(np.array([dt]), np.array([dt])) False """
if not hasattr(a, 'dtype'): a = np.asarray(a) if not hasattr(b, 'dtype'): b = np.asarray(b) is_datetimelike = needs_i8_conversion return ((is_datetimelike(a) and is_object_dtype(b)) or (is_datetimelike(b) and is_object_dtype(a)))
<SYSTEM_TASK:> Check whether the array or dtype should be converted to int64. <END_TASK> <USER_TASK:> Description: def needs_i8_conversion(arr_or_dtype): """ Check whether the array or dtype should be converted to int64. An array-like or dtype "needs" such a conversion if the array-like or dtype is of a datetime-like dtype Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype should be converted to int64. Examples -------- >>> needs_i8_conversion(str) False >>> needs_i8_conversion(np.int64) False >>> needs_i8_conversion(np.datetime64) True >>> needs_i8_conversion(np.array(['a', 'b'])) False >>> needs_i8_conversion(pd.Series([1, 2])) False >>> needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]")) True >>> needs_i8_conversion(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True """
if arr_or_dtype is None: return False return (is_datetime_or_timedelta_dtype(arr_or_dtype) or is_datetime64tz_dtype(arr_or_dtype) or is_period_dtype(arr_or_dtype))
<SYSTEM_TASK:> Check whether the provided array or dtype is of a boolean dtype. <END_TASK> <USER_TASK:> Description: def is_bool_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True """
if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except TypeError: return False if isinstance(arr_or_dtype, CategoricalDtype): arr_or_dtype = arr_or_dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndexClass): # TODO(jreback) # we don't have a boolean Index class # so its object, we need to infer to # guess this return (arr_or_dtype.is_object and arr_or_dtype.inferred_type == 'boolean') elif is_extension_array_dtype(arr_or_dtype): dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return dtype._is_boolean return issubclass(dtype.type, np.bool_)
<SYSTEM_TASK:> Check whether an array-like is of a pandas extension class instance. <END_TASK> <USER_TASK:> Description: def is_extension_type(arr): """ Check whether an array-like is of a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external to it like scipy sparse matrices), and datetime-like arrays. Parameters ---------- arr : array-like The array-like to check. Returns ------- boolean Whether or not the array-like is of a pandas extension class instance. Examples -------- >>> is_extension_type([1, 2, 3]) False >>> is_extension_type(np.array([1, 2, 3])) False >>> >>> cat = pd.Categorical([1, 2, 3]) >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) True >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """
if is_categorical(arr): return True elif is_sparse(arr): return True elif is_datetime64tz_dtype(arr): return True return False
<SYSTEM_TASK:> Check if an object is a pandas extension array type. <END_TASK> <USER_TASK:> Description: def is_extension_array_dtype(arr_or_dtype): """ Check if an object is a pandas extension array type. See the :ref:`Use Guide <extending.extension-types>` for more. Parameters ---------- arr_or_dtype : object For array-like input, the ``.dtype`` attribute will be extracted. Returns ------- bool Whether the `arr_or_dtype` is an extension array type. Notes ----- This checks whether an object implements the pandas extension array interface. In pandas, this includes: * Categorical * Sparse * Interval * Period * DatetimeArray * TimedeltaArray Third-party libraries may implement arrays or types satisfying this interface as well. Examples -------- >>> from pandas.api.types import is_extension_array_dtype >>> arr = pd.Categorical(['a', 'b']) >>> is_extension_array_dtype(arr) True >>> is_extension_array_dtype(arr.dtype) True >>> arr = np.array(['a', 'b']) >>> is_extension_array_dtype(arr.dtype) False """
dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return (isinstance(dtype, ExtensionDtype) or registry.find(dtype) is not None)
<SYSTEM_TASK:> Get the dtype instance associated with an array <END_TASK> <USER_TASK:> Description: def _get_dtype(arr_or_dtype): """ Get the dtype instance associated with an array or dtype object. Parameters ---------- arr_or_dtype : array-like The array-like or dtype object whose dtype we want to extract. Returns ------- obj_dtype : The extract dtype instance from the passed in array or dtype object. Raises ------ TypeError : The passed in object is None. """
if arr_or_dtype is None: raise TypeError("Cannot deduce dtype from null object") # fastpath elif isinstance(arr_or_dtype, np.dtype): return arr_or_dtype elif isinstance(arr_or_dtype, type): return np.dtype(arr_or_dtype) # if we have an array-like elif hasattr(arr_or_dtype, 'dtype'): arr_or_dtype = arr_or_dtype.dtype return pandas_dtype(arr_or_dtype)
<SYSTEM_TASK:> Get a numpy dtype.type-style object for a dtype object. <END_TASK> <USER_TASK:> Description: def infer_dtype_from_object(dtype): """ Get a numpy dtype.type-style object for a dtype object. This methods also includes handling of the datetime64[ns] and datetime64[ns, TZ] objects. If no dtype can be found, we return ``object``. Parameters ---------- dtype : dtype, type The dtype object whose numpy dtype.type-style object we want to extract. Returns ------- dtype_object : The extracted numpy dtype.type-style object. """
if isinstance(dtype, type) and issubclass(dtype, np.generic): # Type object from a dtype return dtype elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)): # dtype object try: _validate_date_like_dtype(dtype) except TypeError: # Should still pass if we don't have a date-like pass return dtype.type try: dtype = pandas_dtype(dtype) except TypeError: pass if is_extension_array_dtype(dtype): return dtype.type elif isinstance(dtype, str): # TODO(jreback) # should deprecate these if dtype in ['datetimetz', 'datetime64tz']: return DatetimeTZDtype.type elif dtype in ['period']: raise NotImplementedError if dtype == 'datetime' or dtype == 'timedelta': dtype += '64' try: return infer_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): # Handles cases like _get_dtype(int) i.e., # Python objects that are valid dtypes # (unlike user-defined types, in general) # # TypeError handles the float16 type code of 'e' # further handle internal types pass return infer_dtype_from_object(np.dtype(dtype))
<SYSTEM_TASK:> Check whether the dtype is a date-like dtype. Raises an error if invalid. <END_TASK> <USER_TASK:> Description: def _validate_date_like_dtype(dtype): """ Check whether the dtype is a date-like dtype. Raises an error if invalid. Parameters ---------- dtype : dtype, type The dtype to check. Raises ------ TypeError : The dtype could not be casted to a date-like dtype. ValueError : The dtype is an illegal date-like dtype (e.g. the the frequency provided is too specific) """
try: typ = np.datetime_data(dtype)[0] except ValueError as e: raise TypeError('{error}'.format(error=e)) if typ != 'generic' and typ != 'ns': msg = '{name!r} is too specific of a frequency, try passing {type!r}' raise ValueError(msg.format(name=dtype.name, type=dtype.type.__name__))
<SYSTEM_TASK:> Convert input into a pandas only dtype object or a numpy dtype object. <END_TASK> <USER_TASK:> Description: def pandas_dtype(dtype): """ Convert input into a pandas only dtype object or a numpy dtype object. Parameters ---------- dtype : object to be converted Returns ------- np.dtype or a pandas dtype Raises ------ TypeError if not a dtype """
# short-circuit if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, PandasExtensionDtype, ExtensionDtype)): return dtype # registered extension types result = registry.find(dtype) if result is not None: return result # try a numpy dtype # raise a consistent TypeError if failed try: npdtype = np.dtype(dtype) except Exception: # we don't want to force a repr of the non-string if not isinstance(dtype, str): raise TypeError("data type not understood") raise TypeError("data type '{}' not understood".format( dtype)) # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will # also catch some valid dtypes such as object, np.object_ and 'object' # which we safeguard against by catching them earlier and returning # np.dtype(valid_dtype) before this condition is evaluated. if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O']: # check hashability to avoid errors/DeprecationWarning when we get # here and `dtype` is an array return npdtype elif npdtype.kind == 'O': raise TypeError("dtype '{}' not understood".format(dtype)) return npdtype
<SYSTEM_TASK:> groupby & merge; we are always performing a left-by type operation <END_TASK> <USER_TASK:> Description: def _groupby_and_merge(by, on, left, right, _merge_pieces, check_duplicates=True): """ groupby & merge; we are always performing a left-by type operation Parameters ---------- by: field to group on: duplicates field left: left frame right: right frame _merge_pieces: function for merging check_duplicates: boolean, default True should we check & clean duplicates """
pieces = [] if not isinstance(by, (list, tuple)): by = [by] lby = left.groupby(by, sort=False) # if we can groupby the rhs # then we can get vastly better perf try: # we will check & remove duplicates if indicated if check_duplicates: if on is None: on = [] elif not isinstance(on, (list, tuple)): on = [on] if right.duplicated(by + on).any(): right = right.drop_duplicates(by + on, keep='last') rby = right.groupby(by, sort=False) except KeyError: rby = None for key, lhs in lby: if rby is None: rhs = right else: try: rhs = right.take(rby.indices[key]) except KeyError: # key doesn't exist in left lcols = lhs.columns.tolist() cols = lcols + [r for r in right.columns if r not in set(lcols)] merged = lhs.reindex(columns=cols) merged.index = range(len(merged)) pieces.append(merged) continue merged = _merge_pieces(lhs, rhs) # make sure join keys are in the merged # TODO, should _merge_pieces do this? for k in by: try: if k in merged: merged[k] = key except KeyError: pass pieces.append(merged) # preserve the original order # if we have a missing piece this can be reset from pandas.core.reshape.concat import concat result = concat(pieces, ignore_index=True) result = result.reindex(columns=pieces[0].columns, copy=False) return result, lby
<SYSTEM_TASK:> Perform an asof merge. This is similar to a left-join except that we <END_TASK> <USER_TASK:> Description: def merge_asof(left, right, on=None, left_on=None, right_on=None, left_index=False, right_index=False, by=None, left_by=None, right_by=None, suffixes=('_x', '_y'), tolerance=None, allow_exact_matches=True, direction='backward'): """Perform an asof merge. This is similar to a left-join except that we match on nearest key rather than equal keys. Both DataFrames must be sorted by the key. For each row in the left DataFrame: - A "backward" search selects the last row in the right DataFrame whose 'on' key is less than or equal to the left's key. - A "forward" search selects the first row in the right DataFrame whose 'on' key is greater than or equal to the left's key. - A "nearest" search selects the row in the right DataFrame whose 'on' key is closest in absolute distance to the left's key. The default is "backward" and is compatible in versions below 0.20.0. The direction parameter was added in version 0.20.0 and introduces "forward" and "nearest". Optionally match on equivalent keys with 'by' before searching with 'on'. .. versionadded:: 0.19.0 Parameters ---------- left : DataFrame right : DataFrame on : label Field name to join on. Must be found in both DataFrames. The data MUST be ordered. Furthermore this must be a numeric column, such as datetimelike, integer, or float. On or left_on/right_on must be given. left_on : label Field name to join on in left DataFrame. right_on : label Field name to join on in right DataFrame. left_index : boolean Use the index of the left DataFrame as the join key. .. versionadded:: 0.19.2 right_index : boolean Use the index of the right DataFrame as the join key. .. versionadded:: 0.19.2 by : column name or list of column names Match on these columns before performing merge operation. left_by : column name Field names to match on in the left DataFrame. .. versionadded:: 0.19.2 right_by : column name Field names to match on in the right DataFrame. .. versionadded:: 0.19.2 suffixes : 2-length sequence (tuple, list, ...) Suffix to apply to overlapping column names in the left and right side, respectively. tolerance : integer or Timedelta, optional, default None Select asof tolerance within this range; must be compatible with the merge index. allow_exact_matches : boolean, default True - If True, allow matching with the same 'on' value (i.e. less-than-or-equal-to / greater-than-or-equal-to) - If False, don't match the same 'on' value (i.e., strictly less-than / strictly greater-than) direction : 'backward' (default), 'forward', or 'nearest' Whether to search for prior, subsequent, or closest matches. .. versionadded:: 0.20.0 Returns ------- merged : DataFrame See Also -------- merge merge_ordered Examples -------- >>> left = pd.DataFrame({'a': [1, 5, 10], 'left_val': ['a', 'b', 'c']}) >>> left a left_val 0 1 a 1 5 b 2 10 c >>> right = pd.DataFrame({'a': [1, 2, 3, 6, 7], ... 'right_val': [1, 2, 3, 6, 7]}) >>> right a right_val 0 1 1 1 2 2 2 3 3 3 6 6 4 7 7 >>> pd.merge_asof(left, right, on='a') a left_val right_val 0 1 a 1 1 5 b 3 2 10 c 7 >>> pd.merge_asof(left, right, on='a', allow_exact_matches=False) a left_val right_val 0 1 a NaN 1 5 b 3.0 2 10 c 7.0 >>> pd.merge_asof(left, right, on='a', direction='forward') a left_val right_val 0 1 a 1.0 1 5 b 6.0 2 10 c NaN >>> pd.merge_asof(left, right, on='a', direction='nearest') a left_val right_val 0 1 a 1 1 5 b 6 2 10 c 7 We can use indexed DataFrames as well. >>> left = pd.DataFrame({'left_val': ['a', 'b', 'c']}, index=[1, 5, 10]) >>> left left_val 1 a 5 b 10 c >>> right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7]}, ... index=[1, 2, 3, 6, 7]) >>> right right_val 1 1 2 2 3 3 6 6 7 7 >>> pd.merge_asof(left, right, left_index=True, right_index=True) left_val right_val 1 a 1 5 b 3 10 c 7 Here is a real-world times-series example >>> quotes time ticker bid ask 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03 >>> trades time ticker price quantity 0 2016-05-25 13:30:00.023 MSFT 51.95 75 1 2016-05-25 13:30:00.038 MSFT 51.95 155 2 2016-05-25 13:30:00.048 GOOG 720.77 100 3 2016-05-25 13:30:00.048 GOOG 720.92 100 4 2016-05-25 13:30:00.048 AAPL 98.00 100 By default we are taking the asof of the quotes >>> pd.merge_asof(trades, quotes, ... on='time', ... by='ticker') time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 2ms between the quote time and the trade time >>> pd.merge_asof(trades, quotes, ... on='time', ... by='ticker', ... tolerance=pd.Timedelta('2ms')) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN 2 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93 3 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN We only asof within 10ms between the quote time and the trade time and we exclude exact matches on time. However *prior* data will propagate forward >>> pd.merge_asof(trades, quotes, ... on='time', ... by='ticker', ... tolerance=pd.Timedelta('10ms'), ... allow_exact_matches=False) time ticker price quantity bid ask 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98 2 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN 3 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN 4 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN """
op = _AsOfMerge(left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, by=by, left_by=left_by, right_by=right_by, suffixes=suffixes, how='asof', tolerance=tolerance, allow_exact_matches=allow_exact_matches, direction=direction) return op.get_result()
<SYSTEM_TASK:> Restore index levels specified as `on` parameters <END_TASK> <USER_TASK:> Description: def _maybe_restore_index_levels(self, result): """ Restore index levels specified as `on` parameters Here we check for cases where `self.left_on` and `self.right_on` pairs each reference an index level in their respective DataFrames. The joined columns corresponding to these pairs are then restored to the index of `result`. **Note:** This method has side effects. It modifies `result` in-place Parameters ---------- result: DataFrame merge result Returns ------- None """
names_to_restore = [] for name, left_key, right_key in zip(self.join_names, self.left_on, self.right_on): if (self.orig_left._is_level_reference(left_key) and self.orig_right._is_level_reference(right_key) and name not in result.index.names): names_to_restore.append(name) if names_to_restore: result.set_index(names_to_restore, inplace=True)
<SYSTEM_TASK:> Create a join index by rearranging one index to match another <END_TASK> <USER_TASK:> Description: def _create_join_index(self, index, other_index, indexer, other_indexer, how='left'): """ Create a join index by rearranging one index to match another Parameters ---------- index: Index being rearranged other_index: Index used to supply values not found in index indexer: how to rearrange index how: replacement is only necessary if indexer based on other_index Returns ------- join_index """
join_index = index.take(indexer) if (self.how in (how, 'outer') and not isinstance(other_index, MultiIndex)): # if final index requires values in other_index but not target # index, indexer may hold missing (-1) values, causing Index.take # to take the final value in target index mask = indexer == -1 if np.any(mask): # if values missing (-1) from target index, # take from other_index instead join_list = join_index.to_numpy() other_list = other_index.take(other_indexer).to_numpy() join_list[mask] = other_list[mask] join_index = Index(join_list, dtype=join_index.dtype, name=join_index.name) return join_index
<SYSTEM_TASK:> Check if we match 'dtype'. <END_TASK> <USER_TASK:> Description: def is_dtype(cls, dtype): """Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- is_dtype : bool Notes ----- The default implementation is True if 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. 2. ``dtype`` is an object and is an instance of ``cls`` 3. ``dtype`` has a ``dtype`` attribute, and any of the above conditions is true for ``dtype.dtype``. """
dtype = getattr(dtype, 'dtype', dtype) if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)): # https://github.com/pandas-dev/pandas/issues/22960 # avoid passing data to `construct_from_string`. This could # cause a FutureWarning from numpy about failing elementwise # comparison from, e.g., comparing DataFrame == 'category'. return False elif dtype is None: return False elif isinstance(dtype, cls): return True try: return cls.construct_from_string(dtype) is not None except TypeError: return False
<SYSTEM_TASK:> Test if pattern or regex is contained within a string of a Series or Index. <END_TASK> <USER_TASK:> Description: def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): """ Test if pattern or regex is contained within a string of a Series or Index. Return boolean Series or Index based on whether a given pattern or regex is contained within a string of a Series or Index. Parameters ---------- pat : str Character sequence or regular expression. case : bool, default True If True, case sensitive. flags : int, default 0 (no flags) Flags to pass through to the re module, e.g. re.IGNORECASE. na : default NaN Fill value for missing values. regex : bool, default True If True, assumes the pat is a regular expression. If False, treats the pat as a literal string. Returns ------- Series or Index of boolean values A Series or Index of boolean values indicating whether the given pattern is contained within the string of each element of the Series or Index. See Also -------- match : Analogous, but stricter, relying on re.match instead of re.search. Series.str.startswith : Test if the start of each string element matches a pattern. Series.str.endswith : Same as startswith, but tests the end of string. Examples -------- Returning a Series of booleans using only a literal pattern. >>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN]) >>> s1.str.contains('og', regex=False) 0 False 1 True 2 False 3 False 4 NaN dtype: object Returning an Index of booleans using only a literal pattern. >>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN]) >>> ind.str.contains('23', regex=False) Index([False, False, False, True, nan], dtype='object') Specifying case sensitivity using `case`. >>> s1.str.contains('oG', case=True, regex=True) 0 False 1 False 2 False 3 False 4 NaN dtype: object Specifying `na` to be `False` instead of `NaN` replaces NaN values with `False`. If Series or Index does not contain NaN values the resultant dtype will be `bool`, otherwise, an `object` dtype. >>> s1.str.contains('og', na=False, regex=True) 0 False 1 True 2 False 3 False 4 False dtype: bool Returning 'house' or 'dog' when either expression occurs in a string. >>> s1.str.contains('house|dog', regex=True) 0 False 1 True 2 True 3 False 4 NaN dtype: object Ignoring case sensitivity using `flags` with regex. >>> import re >>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True) 0 False 1 False 2 True 3 False 4 NaN dtype: object Returning any digit using regular expression. >>> s1.str.contains('\\d', regex=True) 0 False 1 False 2 False 3 True 4 NaN dtype: object Ensure `pat` is a not a literal pattern when `regex` is set to True. Note in the following example one might expect only `s2[1]` and `s2[3]` to return `True`. However, '.0' as a regex matches any character followed by a 0. >>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35']) >>> s2.str.contains('.0', regex=True) 0 True 1 True 2 False 3 True 4 False dtype: bool """
if regex: if not case: flags |= re.IGNORECASE regex = re.compile(pat, flags=flags) if regex.groups > 0: warnings.warn("This pattern has match groups. To actually get the" " groups, use str.extract.", UserWarning, stacklevel=3) f = lambda x: bool(regex.search(x)) else: if case: f = lambda x: pat in x else: upper_pat = pat.upper() f = lambda x: upper_pat in x uppered = _na_map(lambda x: x.upper(), arr) return _na_map(f, uppered, na, dtype=bool) return _na_map(f, arr, na, dtype=bool)
<SYSTEM_TASK:> Test if the start of each string element matches a pattern. <END_TASK> <USER_TASK:> Description: def str_startswith(arr, pat, na=np.nan): """ Test if the start of each string element matches a pattern. Equivalent to :meth:`str.startswith`. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. na : object, default NaN Object shown if element tested is not a string. Returns ------- Series or Index of bool A Series of booleans indicating whether the given pattern matches the start of each string element. See Also -------- str.startswith : Python standard library string method. Series.str.endswith : Same as startswith, but tests the end of string. Series.str.contains : Tests if string element contains a pattern. Examples -------- >>> s = pd.Series(['bat', 'Bear', 'cat', np.nan]) >>> s 0 bat 1 Bear 2 cat 3 NaN dtype: object >>> s.str.startswith('b') 0 True 1 False 2 False 3 NaN dtype: object Specifying `na` to be `False` instead of `NaN`. >>> s.str.startswith('b', na=False) 0 True 1 False 2 False 3 False dtype: bool """
f = lambda x: x.startswith(pat) return _na_map(f, arr, na, dtype=bool)
<SYSTEM_TASK:> Test if the end of each string element matches a pattern. <END_TASK> <USER_TASK:> Description: def str_endswith(arr, pat, na=np.nan): """ Test if the end of each string element matches a pattern. Equivalent to :meth:`str.endswith`. Parameters ---------- pat : str Character sequence. Regular expressions are not accepted. na : object, default NaN Object shown if element tested is not a string. Returns ------- Series or Index of bool A Series of booleans indicating whether the given pattern matches the end of each string element. See Also -------- str.endswith : Python standard library string method. Series.str.startswith : Same as endswith, but tests the start of string. Series.str.contains : Tests if string element contains a pattern. Examples -------- >>> s = pd.Series(['bat', 'bear', 'caT', np.nan]) >>> s 0 bat 1 bear 2 caT 3 NaN dtype: object >>> s.str.endswith('t') 0 True 1 False 2 False 3 NaN dtype: object Specifying `na` to be `False` instead of `NaN`. >>> s.str.endswith('t', na=False) 0 True 1 False 2 False 3 False dtype: bool """
f = lambda x: x.endswith(pat) return _na_map(f, arr, na, dtype=bool)
<SYSTEM_TASK:> Duplicate each string in the Series or Index. <END_TASK> <USER_TASK:> Description: def str_repeat(arr, repeats): """ Duplicate each string in the Series or Index. Parameters ---------- repeats : int or sequence of int Same value for all (int) or different value per (sequence). Returns ------- Series or Index of object Series or Index of repeated string objects specified by input parameter repeats. Examples -------- >>> s = pd.Series(['a', 'b', 'c']) >>> s 0 a 1 b 2 c dtype: object Single int repeats string in Series >>> s.str.repeat(repeats=2) 0 aa 1 bb 2 cc dtype: object Sequence of int repeats corresponding string in Series >>> s.str.repeat(repeats=[1, 2, 3]) 0 a 1 bb 2 ccc dtype: object """
if is_scalar(repeats): def scalar_rep(x): try: return bytes.__mul__(x, repeats) except TypeError: return str.__mul__(x, repeats) return _na_map(scalar_rep, arr) else: def rep(x, r): try: return bytes.__mul__(x, r) except TypeError: return str.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) result = libops.vec_binop(com.values_from_object(arr), repeats, rep) return result
<SYSTEM_TASK:> Determine if each string matches a regular expression. <END_TASK> <USER_TASK:> Description: def str_match(arr, pat, case=True, flags=0, na=np.nan): """ Determine if each string matches a regular expression. Parameters ---------- pat : str Character sequence or regular expression. case : bool, default True If True, case sensitive. flags : int, default 0 (no flags) re module flags, e.g. re.IGNORECASE. na : default NaN Fill value for missing values. Returns ------- Series/array of boolean values See Also -------- contains : Analogous, but less strict, relying on re.search instead of re.match. extract : Extract matched groups. """
if not case: flags |= re.IGNORECASE regex = re.compile(pat, flags=flags) dtype = bool f = lambda x: bool(regex.match(x)) return _na_map(f, arr, na, dtype=dtype)
<SYSTEM_TASK:> Used in both extract_noexpand and extract_frame <END_TASK> <USER_TASK:> Description: def _groups_or_na_fun(regex): """Used in both extract_noexpand and extract_frame"""
if regex.groups == 0: raise ValueError("pattern contains no capture groups") empty_row = [np.nan] * regex.groups def f(x): if not isinstance(x, str): return empty_row m = regex.search(x) if m: return [np.nan if item is None else item for item in m.groups()] else: return empty_row return f
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def str_extract(arr, pat, flags=0, expand=True): r""" Extract capture groups in the regex `pat` as columns in a DataFrame. For each subject string in the Series, extract groups from the first match of regular expression `pat`. Parameters ---------- pat : str Regular expression pattern with capturing groups. flags : int, default 0 (no flags) Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that modify regular expression matching for things like case, spaces, etc. For more details, see :mod:`re`. expand : bool, default True If True, return DataFrame with one column per capture group. If False, return a Series/Index if there is one capture group or DataFrame if there are multiple capture groups. .. versionadded:: 0.18.0 Returns ------- DataFrame or Series or Index A DataFrame with one row for each subject string, and one column for each group. Any capture group names in regular expression pat will be used for column names; otherwise capture group numbers will be used. The dtype of each result column is always object, even when no match is found. If ``expand=False`` and pat has only one capture group, then return a Series (if subject is a Series) or Index (if subject is an Index). See Also -------- extractall : Returns all matches (not just the first match). Examples -------- A pattern with two groups will return a DataFrame with two columns. Non-matches will be NaN. >>> s = pd.Series(['a1', 'b2', 'c3']) >>> s.str.extract(r'([ab])(\d)') 0 1 0 a 1 1 b 2 2 NaN NaN A pattern may contain optional groups. >>> s.str.extract(r'([ab])?(\d)') 0 1 0 a 1 1 b 2 2 NaN 3 Named groups will become column names in the result. >>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)') letter digit 0 a 1 1 b 2 2 NaN NaN A pattern with one group will return a DataFrame with one column if expand=True. >>> s.str.extract(r'[ab](\d)', expand=True) 0 0 1 1 2 2 NaN A pattern with one group will return a Series if expand=False. >>> s.str.extract(r'[ab](\d)', expand=False) 0 1 1 2 2 NaN dtype: object """
if not isinstance(expand, bool): raise ValueError("expand must be True or False") if expand: return _str_extract_frame(arr._orig, pat, flags=flags) else: result, name = _str_extract_noexpand(arr._parent, pat, flags=flags) return arr._wrap_result(result, name=name, expand=expand)
<SYSTEM_TASK:> Slice substrings from each element in the Series or Index. <END_TASK> <USER_TASK:> Description: def str_slice(arr, start=None, stop=None, step=None): """ Slice substrings from each element in the Series or Index. Parameters ---------- start : int, optional Start position for slice operation. stop : int, optional Stop position for slice operation. step : int, optional Step size for slice operation. Returns ------- Series or Index of object Series or Index from sliced substring from original string object. See Also -------- Series.str.slice_replace : Replace a slice with a string. Series.str.get : Return element at position. Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i` being the position. Examples -------- >>> s = pd.Series(["koala", "fox", "chameleon"]) >>> s 0 koala 1 fox 2 chameleon dtype: object >>> s.str.slice(start=1) 0 oala 1 ox 2 hameleon dtype: object >>> s.str.slice(stop=2) 0 ko 1 fo 2 ch dtype: object >>> s.str.slice(step=2) 0 kaa 1 fx 2 caeen dtype: object >>> s.str.slice(start=0, stop=5, step=3) 0 kl 1 f 2 cm dtype: object Equivalent behaviour to: >>> s.str[0:5:3] 0 kl 1 f 2 cm dtype: object """
obj = slice(start, stop, step) f = lambda x: x[obj] return _na_map(f, arr)
<SYSTEM_TASK:> Replace a positional slice of a string with another value. <END_TASK> <USER_TASK:> Description: def str_slice_replace(arr, start=None, stop=None, repl=None): """ Replace a positional slice of a string with another value. Parameters ---------- start : int, optional Left index position to use for the slice. If not specified (None), the slice is unbounded on the left, i.e. slice from the start of the string. stop : int, optional Right index position to use for the slice. If not specified (None), the slice is unbounded on the right, i.e. slice until the end of the string. repl : str, optional String for replacement. If not specified (None), the sliced region is replaced with an empty string. Returns ------- Series or Index Same type as the original object. See Also -------- Series.str.slice : Just slicing without replacement. Examples -------- >>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde']) >>> s 0 a 1 ab 2 abc 3 abdc 4 abcde dtype: object Specify just `start`, meaning replace `start` until the end of the string with `repl`. >>> s.str.slice_replace(1, repl='X') 0 aX 1 aX 2 aX 3 aX 4 aX dtype: object Specify just `stop`, meaning the start of the string to `stop` is replaced with `repl`, and the rest of the string is included. >>> s.str.slice_replace(stop=2, repl='X') 0 X 1 X 2 Xc 3 Xdc 4 Xcde dtype: object Specify `start` and `stop`, meaning the slice from `start` to `stop` is replaced with `repl`. Everything before or after `start` and `stop` is included as is. >>> s.str.slice_replace(start=1, stop=3, repl='X') 0 aX 1 aX 2 aX 3 aXc 4 aXde dtype: object """
if repl is None: repl = '' def f(x): if x[start:stop] == '': local_stop = start else: local_stop = stop y = '' if start is not None: y += x[:start] y += repl if stop is not None: y += x[local_stop:] return y return _na_map(f, arr)
<SYSTEM_TASK:> Extract element from each component at specified position. <END_TASK> <USER_TASK:> Description: def str_get(arr, i): """ Extract element from each component at specified position. Extract element from lists, tuples, or strings in each element in the Series/Index. Parameters ---------- i : int Position of element to extract. Returns ------- Series or Index Examples -------- >>> s = pd.Series(["String", ... (1, 2, 3), ... ["a", "b", "c"], ... 123, ... -456, ... {1: "Hello", "2": "World"}]) >>> s 0 String 1 (1, 2, 3) 2 [a, b, c] 3 123 4 -456 5 {1: 'Hello', '2': 'World'} dtype: object >>> s.str.get(1) 0 t 1 2 2 b 3 NaN 4 NaN 5 Hello dtype: object >>> s.str.get(-1) 0 g 1 3 2 c 3 NaN 4 NaN 5 None dtype: object """
def f(x): if isinstance(x, dict): return x.get(i) elif len(x) > i >= -len(x): return x[i] return np.nan return _na_map(f, arr)
<SYSTEM_TASK:> Convert bytes and non-string into Python 3 str <END_TASK> <USER_TASK:> Description: def to_str(s): """ Convert bytes and non-string into Python 3 str """
if isinstance(s, bytes): s = s.decode('utf-8') elif not isinstance(s, str): s = str(s) return s
<SYSTEM_TASK:> construct and return a row or column based frame apply object <END_TASK> <USER_TASK:> Description: def frame_apply(obj, func, axis=0, broadcast=None, raw=False, reduce=None, result_type=None, ignore_failures=False, args=None, kwds=None): """ construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis) if axis == 0: klass = FrameRowApply elif axis == 1: klass = FrameColumnApply return klass(obj, func, broadcast=broadcast, raw=raw, reduce=reduce, result_type=result_type, ignore_failures=ignore_failures, args=args, kwds=kwds)
<SYSTEM_TASK:> compute the results <END_TASK> <USER_TASK:> Description: def get_result(self): """ compute the results """
# dispatch to agg if is_list_like(self.f) or is_dict_like(self.f): return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds) # all empty if len(self.columns) == 0 and len(self.index) == 0: return self.apply_empty_result() # string dispatch if isinstance(self.f, str): # Support for `frame.transform('method')` # Some methods (shift, etc.) require the axis argument, others # don't, so inspect and insert if necessary. func = getattr(self.obj, self.f) sig = inspect.getfullargspec(func) if 'axis' in sig.args: self.kwds['axis'] = self.axis return func(*self.args, **self.kwds) # ufunc elif isinstance(self.f, np.ufunc): with np.errstate(all='ignore'): results = self.obj._data.apply('apply', func=self.f) return self.obj._constructor(data=results, index=self.index, columns=self.columns, copy=False) # broadcasting if self.result_type == 'broadcast': return self.apply_broadcast() # one axis empty elif not all(self.obj.shape): return self.apply_empty_result() # raw elif self.raw and not self.obj._is_mixed_type: return self.apply_raw() return self.apply_standard()
<SYSTEM_TASK:> we have an empty result; at least 1 axis is 0 <END_TASK> <USER_TASK:> Description: def apply_empty_result(self): """ we have an empty result; at least 1 axis is 0 we will try to apply the function to an empty series in order to see if this is a reduction function """
# we are not asked to reduce or infer reduction # so just return a copy of the existing object if self.result_type not in ['reduce', None]: return self.obj.copy() # we may need to infer reduce = self.result_type == 'reduce' from pandas import Series if not reduce: EMPTY_SERIES = Series([]) try: r = self.f(EMPTY_SERIES, *self.args, **self.kwds) reduce = not isinstance(r, Series) except Exception: pass if reduce: return self.obj._constructor_sliced(np.nan, index=self.agg_axis) else: return self.obj.copy()
<SYSTEM_TASK:> apply to the values as a numpy array <END_TASK> <USER_TASK:> Description: def apply_raw(self): """ apply to the values as a numpy array """
try: result = reduction.reduce(self.values, self.f, axis=self.axis) except Exception: result = np.apply_along_axis(self.f, self.axis, self.values) # TODO: mixed type case if result.ndim == 2: return self.obj._constructor(result, index=self.index, columns=self.columns) else: return self.obj._constructor_sliced(result, index=self.agg_axis)
<SYSTEM_TASK:> return the results for the rows <END_TASK> <USER_TASK:> Description: def wrap_results_for_axis(self): """ return the results for the rows """
results = self.results result = self.obj._constructor(data=results) if not isinstance(results[0], ABCSeries): try: result.index = self.res_columns except ValueError: pass try: result.columns = self.res_index except ValueError: pass return result
<SYSTEM_TASK:> return the results for the columns <END_TASK> <USER_TASK:> Description: def wrap_results_for_axis(self): """ return the results for the columns """
results = self.results # we have requested to expand if self.result_type == 'expand': result = self.infer_to_same_shape() # we have a non-series and don't want inference elif not isinstance(results[0], ABCSeries): from pandas import Series result = Series(results) result.index = self.res_index # we may want to infer results else: result = self.infer_to_same_shape() return result
<SYSTEM_TASK:> infer the results to the same shape as the input object <END_TASK> <USER_TASK:> Description: def infer_to_same_shape(self): """ infer the results to the same shape as the input object """
results = self.results result = self.obj._constructor(data=results) result = result.T # set the index result.index = self.res_index # infer dtypes result = result.infer_objects() return result
<SYSTEM_TASK:> Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module. <END_TASK> <USER_TASK:> Description: def get_model(self, opt_fn, emb_sz, n_hid, n_layers, **kwargs): """ Method returns a RNN_Learner object, that wraps an instance of the RNN_Encoder module. Args: opt_fn (Optimizer): the torch optimizer function to use emb_sz (int): embedding size n_hid (int): number of hidden inputs n_layers (int): number of hidden layers kwargs: other arguments Returns: An instance of the RNN_Learner class. """
m = get_language_model(self.nt, emb_sz, n_hid, n_layers, self.pad_idx, **kwargs) model = SingleModel(to_gpu(m)) return RNN_Learner(self, model, opt_fn=opt_fn)
<SYSTEM_TASK:> Method used to instantiate a LanguageModelData object that can be used for a <END_TASK> <USER_TASK:> Description: def from_text_files(cls, path, field, train, validation, test=None, bs=64, bptt=70, **kwargs): """ Method used to instantiate a LanguageModelData object that can be used for a supported nlp task. Args: path (str): the absolute path in which temporary model data will be saved field (Field): torchtext field train (str): file location of the training data validation (str): file location of the validation data test (str): file location of the testing data bs (int): batch size to use bptt (int): back propagation through time hyper-parameter kwargs: other arguments Returns: a LanguageModelData instance, which most importantly, provides us the datasets for training, validation, and testing Note: The train, validation, and test path can be pointed to any file (or folder) that contains a valid text corpus. """
trn_ds, val_ds, test_ds = ConcatTextDataset.splits( path, text_field=field, train=train, validation=validation, test=test) return cls(path, field, trn_ds, val_ds, test_ds, bs, bptt, **kwargs)
<SYSTEM_TASK:> Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`. <END_TASK> <USER_TASK:> Description: def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True, include:Optional[Collection[str]]=None, processor:PreProcessors=None, **kwargs)->'ItemList': """Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`. `recurse` determines if we search subfolders."""
path = Path(path) return cls(get_files(path, extensions, recurse=recurse, include=include), path=path, processor=processor, **kwargs)
<SYSTEM_TASK:> Build Google Images Search Url params and return them as a string. <END_TASK> <USER_TASK:> Description: def _url_params(size:str='>400*300', format:str='jpg') -> str: "Build Google Images Search Url params and return them as a string." _fmts = {'jpg':'ift:jpg','gif':'ift:gif','png':'ift:png','bmp':'ift:bmp', 'svg':'ift:svg','webp':'webp','ico':'ift:ico'} if size not in _img_sizes: raise RuntimeError(f"""Unexpected size argument value: {size}. See `widgets.image_downloader._img_sizes` for supported sizes."""
) if format not in _fmts: raise RuntimeError(f"Unexpected image file format: {format}. Use jpg, gif, png, bmp, svg, webp, or ico.") return "&tbs=" + _img_sizes[size] + "," + _fmts[format]
<SYSTEM_TASK:> Downloads images in `img_tuples` to `label_path`. <END_TASK> <USER_TASK:> Description: def _download_images(label_path:PathOrStr, img_tuples:list, max_workers:int=defaults.cpus, timeout:int=4) -> FilePathList: """ Downloads images in `img_tuples` to `label_path`. If the directory doesn't exist, it'll be created automatically. Uses `parallel` to speed things up in `max_workers` when the system has enough CPU cores. If something doesn't work, try setting up `max_workers=0` to debug. """
os.makedirs(Path(label_path), exist_ok=True) parallel( partial(_download_single_image, label_path, timeout=timeout), img_tuples, max_workers=max_workers) return get_image_files(label_path)
<SYSTEM_TASK:> for each string defined in self.weights, the corresponding <END_TASK> <USER_TASK:> Description: def _setup(self): """ for each string defined in self.weights, the corresponding attribute in the wrapped module is referenced, then deleted, and subsequently registered as a new parameter with a slightly modified name. Args: None Returns: None """
if isinstance(self.module, torch.nn.RNNBase): self.module.flatten_parameters = noop for name_w in self.weights: w = getattr(self.module, name_w) del self.module._parameters[name_w] self.module.register_parameter(name_w + '_raw', nn.Parameter(w.data))
<SYSTEM_TASK:> Uses pytorch's built-in dropout function to apply dropout to the parameters of <END_TASK> <USER_TASK:> Description: def _setweights(self): """ Uses pytorch's built-in dropout function to apply dropout to the parameters of the wrapped module. Args: None Returns: None """
for name_w in self.weights: raw_w = getattr(self.module, name_w + '_raw') w = torch.nn.functional.dropout(raw_w, p=self.dropout, training=self.training) if hasattr(self.module, name_w): delattr(self.module, name_w) setattr(self.module, name_w, w)
<SYSTEM_TASK:> Check the underlying data in the training set can be properly loaded. <END_TASK> <USER_TASK:> Description: def sanity_check(self): "Check the underlying data in the training set can be properly loaded." final_message = "You can deactivate this warning by passing `no_check=True`." if not hasattr(self.train_ds, 'items') or len(self.train_ds.items) == 0 or not hasattr(self.train_dl, 'batch_sampler'): return if len(self.train_dl) == 0: warn(f"""Your training dataloader is empty, you have only {len(self.train_dl.dataset)} items in your training set. Your batch size is {self.train_dl.batch_size}, you should lower it."""
) print(final_message) return idx = next(iter(self.train_dl.batch_sampler)) samples,fails = [],[] for i in idx: try: samples.append(self.train_dl.dataset[i]) except: fails.append(i) if len(fails) > 0: warn_msg = "There seems to be something wrong with your dataset, for example, in the first batch can't access" if len(fails) == len(idx): warn_msg += f" any element of self.train_ds.\nTried: {show_some(idx)}" else: warn_msg += f" these elements in self.train_ds: {show_some(fails)}" warn(warn_msg) print(final_message) return try: batch = self.collate_fn(samples) except: message = "It's not possible to collate samples of your dataset together in a batch." try: shapes = [[o[i].data.shape for o in samples] for i in range(2)] message += f'\nShapes of the inputs/targets:\n{shapes}' except: pass warn(message) print(final_message)
<SYSTEM_TASK:> Make report in form of two notebooks. <END_TASK> <USER_TASK:> Description: def make_report(self, outcome): """Make report in form of two notebooks. Use nbdime diff-web to present the difference between reference cells and test cells. """
failures = self.getreports('failed') if not failures: return for rep in failures: # Check if this is a notebook node msg = self._getfailureheadline(rep) lines = rep.longrepr.splitlines() if len(lines) > 1: self.section(msg, lines[1]) self._outrep_summary(rep) tmpdir = tempfile.mkdtemp() try: ref_file = os.path.join(tmpdir, 'reference.ipynb') test_file = os.path.join(tmpdir, 'test_result.ipynb') with io.open(ref_file, "w", encoding="utf8") as f: nbformat.write(self.nb_ref, f) with io.open(test_file, "w", encoding="utf8") as f: nbformat.write(self.nb_test, f) run_server( port=0, # Run on random port cwd=tmpdir, closable=True, on_port=lambda port: browse( port, ref_file, test_file, None)) finally: shutil.rmtree(tmpdir)
<SYSTEM_TASK:> Creates a fp32 copy of model parameters and sets optimizer parameters <END_TASK> <USER_TASK:> Description: def copy_model_to_fp32(m, optim): """ Creates a fp32 copy of model parameters and sets optimizer parameters """
fp32_params = [m_param.clone().type(torch.cuda.FloatTensor).detach() for m_param in trainable_params_(m)] optim_groups = [group['params'] for group in optim.param_groups] iter_fp32_params = iter(fp32_params) for group_params in optim_groups: for i in range(len(group_params)): if not group_params[i].requires_grad: continue # only update trainable_params_ fp32_param = next(iter_fp32_params) assert(fp32_param.shape == group_params[i].shape) fp32_param.requires_grad = group_params[i].requires_grad group_params[i] = fp32_param return fp32_params