text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Hash a Categorical by hashing its categories, and then mapping the codes <END_TASK> <USER_TASK:> Description: def _hash_categorical(c, encoding, hash_key): """ Hash a Categorical by hashing its categories, and then mapping the codes to the hashes Parameters ---------- c : Categorical encoding : string, default 'utf8' hash_key : string key to encode, default to _default_hash_key Returns ------- ndarray of hashed values array, same size as len(c) """
# Convert ExtensionArrays to ndarrays values = np.asarray(c.categories.values) hashed = hash_array(values, encoding, hash_key, categorize=False) # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float # instead, directly construct the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 mask = c.isna() if len(hashed): result = hashed.take(c.codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = np.iinfo(np.uint64).max return result
<SYSTEM_TASK:> Given a 1d array, return an array of deterministic integers. <END_TASK> <USER_TASK:> Description: def hash_array(vals, encoding='utf8', hash_key=None, categorize=True): """ Given a 1d array, return an array of deterministic integers. .. versionadded:: 0.19.2 Parameters ---------- vals : ndarray, Categorical encoding : string, default 'utf8' encoding for data & key when strings hash_key : string key to encode, default to _default_hash_key categorize : bool, default True Whether to first categorize object arrays before hashing. This is more efficient when the array contains duplicate values. .. versionadded:: 0.20.0 Returns ------- 1d uint64 numpy array of hash values, same length as the vals """
if not hasattr(vals, 'dtype'): raise TypeError("must pass a ndarray-like") dtype = vals.dtype if hash_key is None: hash_key = _default_hash_key # For categoricals, we hash the categories, then remap the codes to the # hash values. (This check is above the complex check so that we don't ask # numpy if categorical is a subdtype of complex, as it will choke). if is_categorical_dtype(dtype): return _hash_categorical(vals, encoding, hash_key) elif is_extension_array_dtype(dtype): vals, _ = vals._values_for_factorize() dtype = vals.dtype # we'll be working with everything as 64-bit values, so handle this # 128-bit value early if np.issubdtype(dtype, np.complex128): return hash_array(vals.real) + 23 * hash_array(vals.imag) # First, turn whatever array this is into unsigned 64-bit ints, if we can # manage it. elif isinstance(dtype, np.bool): vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view('u{}'.format(vals.dtype.itemsize)).astype('u8') else: # With repeated values, its MUCH faster to categorize object dtypes, # then hash and rename categories. We allow skipping the categorization # when the values are known/likely to be unique. if categorize: from pandas import factorize, Categorical, Index codes, categories = factorize(vals, sort=False) cat = Categorical(codes, Index(categories), ordered=False, fastpath=True) return _hash_categorical(cat, encoding, hash_key) try: vals = hashing.hash_object_array(vals, hash_key, encoding) except TypeError: # we have mixed types vals = hashing.hash_object_array(vals.astype(str).astype(object), hash_key, encoding) # Then, redistribute these 64-bit ints within the space of 64-bit ints vals ^= vals >> 30 vals *= np.uint64(0xbf58476d1ce4e5b9) vals ^= vals >> 27 vals *= np.uint64(0x94d049bb133111eb) vals ^= vals >> 31 return vals
<SYSTEM_TASK:> Execute a command as a OS terminal. <END_TASK> <USER_TASK:> Description: def _run_os(*args): """ Execute a command as a OS terminal. Parameters ---------- *args : list of str Command and parameters to be executed Examples -------- >>> DocBuilder()._run_os('python', '--version') """
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
<SYSTEM_TASK:> Open a browser tab showing single <END_TASK> <USER_TASK:> Description: def _open_browser(self, single_doc_html): """ Open a browser tab showing single """
url = os.path.join('file://', DOC_PATH, 'build', 'html', single_doc_html) webbrowser.open(url, new=2)
<SYSTEM_TASK:> Open the rst file `page` and extract its title. <END_TASK> <USER_TASK:> Description: def _get_page_title(self, page): """ Open the rst file `page` and extract its title. """
fname = os.path.join(SOURCE_PATH, '{}.rst'.format(page)) option_parser = docutils.frontend.OptionParser( components=(docutils.parsers.rst.Parser,)) doc = docutils.utils.new_document( '<doc>', option_parser.get_default_values()) with open(fname) as f: data = f.read() parser = docutils.parsers.rst.Parser() # do not generate any warning when parsing the rst with open(os.devnull, 'a') as f: doc.reporter.stream = f parser.parse(data, doc) section = next(node for node in doc.children if isinstance(node, docutils.nodes.section)) title = next(node for node in section.children if isinstance(node, docutils.nodes.title)) return title.astext()
<SYSTEM_TASK:> Clean documentation generated files. <END_TASK> <USER_TASK:> Description: def clean(): """ Clean documentation generated files. """
shutil.rmtree(BUILD_PATH, ignore_errors=True) shutil.rmtree(os.path.join(SOURCE_PATH, 'reference', 'api'), ignore_errors=True)
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def _format_multicolumn(self, row, ilevels): r""" Combine columns belonging to a group to a single multicolumn entry according to self.multicolumn_format e.g.: a & & & b & c & will become \multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c} """
row2 = list(row[:ilevels]) ncol = 1 coltext = '' def append_col(): # write multicolumn if needed if ncol > 1: row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}' .format(ncol=ncol, fmt=self.multicolumn_format, txt=coltext.strip())) # don't modify where not needed else: row2.append(coltext) for c in row[ilevels:]: # if next col has text, write the previous if c.strip(): if coltext: append_col() coltext = c ncol = 1 # if not, add it to the previous multicolumn else: ncol += 1 # write last column name if coltext: append_col() return row2
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def _format_multirow(self, row, ilevels, i, rows): r""" Check following rows, whether row should be a multirow e.g.: becomes: a & 0 & \multirow{2}{*}{a} & 0 & & 1 & & 1 & b & 0 & \cline{1-2} b & 0 & """
for j in range(ilevels): if row[j].strip(): nrow = 1 for r in rows[i + 1:]: if not r[j].strip(): nrow += 1 else: break if nrow > 1: # overwrite non-multirow entry row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format( nrow=nrow, row=row[j].strip()) # save when to end the current block with \cline self.clinebuf.append([i + nrow - 1, j + 1]) return row
<SYSTEM_TASK:> Print clines after multirow-blocks are finished <END_TASK> <USER_TASK:> Description: def _print_cline(self, buf, i, icol): """ Print clines after multirow-blocks are finished """
for cl in self.clinebuf: if cl[0] == i: buf.write('\\cline{{{cl:d}-{icol:d}}}\n' .format(cl=cl[1], icol=icol)) # remove entries that have been written to buffer self.clinebuf = [x for x in self.clinebuf if x[0] != i]
<SYSTEM_TASK:> Checks whether the 'name' parameter for parsing is either <END_TASK> <USER_TASK:> Description: def _validate_integer(name, val, min_val=0): """ Checks whether the 'name' parameter for parsing is either an integer OR float that can SAFELY be cast to an integer without losing accuracy. Raises a ValueError if that is not the case. Parameters ---------- name : string Parameter name (used for error reporting) val : int or float The value to check min_val : int Minimum allowed value (val < min_val will result in a ValueError) """
msg = "'{name:s}' must be an integer >={min_val:d}".format(name=name, min_val=min_val) if val is not None: if is_float(val): if int(val) != val: raise ValueError(msg) val = int(val) elif not (is_integer(val) and val >= min_val): raise ValueError(msg) return val
<SYSTEM_TASK:> Check if the `names` parameter contains duplicates. <END_TASK> <USER_TASK:> Description: def _validate_names(names): """ Check if the `names` parameter contains duplicates. If duplicates are found, we issue a warning before returning. Parameters ---------- names : array-like or None An array containing a list of the names used for the output DataFrame. Returns ------- names : array-like or None The original `names` parameter. """
if names is not None: if len(names) != len(set(names)): msg = ("Duplicate names specified. This " "will raise an error in the future.") warnings.warn(msg, UserWarning, stacklevel=3) return names
<SYSTEM_TASK:> r""" <END_TASK> <USER_TASK:> Description: def read_fwf(filepath_or_buffer: FilePathOrBuffer, colspecs='infer', widths=None, infer_nrows=100, **kwds): r""" Read a table of fixed-width formatted lines into DataFrame. Also supports optionally iterating or breaking of the file into chunks. Additional help can be found in the `online docs for IO Tools <http://pandas.pydata.org/pandas-docs/stable/io.html>`_. Parameters ---------- filepath_or_buffer : str, path object, or file-like object Any valid string path is acceptable. The string could be a URL. Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is expected. A local file could be: file://localhost/path/to/table.csv. If you want to pass in a path object, pandas accepts either ``pathlib.Path`` or ``py._path.local.LocalPath``. By file-like object, we refer to objects with a ``read()`` method, such as a file handler (e.g. via builtin ``open`` function) or ``StringIO``. colspecs : list of tuple (int, int) or 'infer'. optional A list of tuples giving the extents of the fixed-width fields of each line as half-open intervals (i.e., [from, to[ ). String value 'infer' can be used to instruct the parser to try detecting the column specifications from the first 100 rows of the data which are not being skipped via skiprows (default='infer'). widths : list of int, optional A list of field widths which can be used instead of 'colspecs' if the intervals are contiguous. infer_nrows : int, default 100 The number of rows to consider when letting the parser determine the `colspecs`. .. versionadded:: 0.24.0 **kwds : optional Optional keyword arguments can be passed to ``TextFileReader``. Returns ------- DataFrame or TextParser A comma-separated values (csv) file is returned as two-dimensional data structure with labeled axes. See Also -------- to_csv : Write DataFrame to a comma-separated values (csv) file. read_csv : Read a comma-separated values (csv) file into DataFrame. Examples -------- >>> pd.read_fwf('data.csv') # doctest: +SKIP """
# Check input arguments. if colspecs is None and widths is None: raise ValueError("Must specify either colspecs or widths") elif colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and " "'colspecs'") # Compute 'colspecs' from 'widths', if specified. if widths is not None: colspecs, col = [], 0 for w in widths: colspecs.append((col, col + w)) col += w kwds['colspecs'] = colspecs kwds['infer_nrows'] = infer_nrows kwds['engine'] = 'python-fwf' return _read(filepath_or_buffer, kwds)
<SYSTEM_TASK:> Check whether or not the `columns` parameter <END_TASK> <USER_TASK:> Description: def _is_potential_multi_index(columns): """ Check whether or not the `columns` parameter could be converted into a MultiIndex. Parameters ---------- columns : array-like Object which may or may not be convertible into a MultiIndex Returns ------- boolean : Whether or not columns could become a MultiIndex """
return (len(columns) and not isinstance(columns, MultiIndex) and all(isinstance(c, tuple) for c in columns))
<SYSTEM_TASK:> Check whether or not the 'usecols' parameter <END_TASK> <USER_TASK:> Description: def _evaluate_usecols(usecols, names): """ Check whether or not the 'usecols' parameter is a callable. If so, enumerates the 'names' parameter and returns a set of indices for each entry in 'names' that evaluates to True. If not a callable, returns 'usecols'. """
if callable(usecols): return {i for i, name in enumerate(names) if usecols(name)} return usecols
<SYSTEM_TASK:> Validates that all usecols are present in a given <END_TASK> <USER_TASK:> Description: def _validate_usecols_names(usecols, names): """ Validates that all usecols are present in a given list of names. If not, raise a ValueError that shows what usecols are missing. Parameters ---------- usecols : iterable of usecols The columns to validate are present in names. names : iterable of names The column names to check against. Returns ------- usecols : iterable of usecols The `usecols` parameter if the validation succeeds. Raises ------ ValueError : Columns were missing. Error message will list them. """
missing = [c for c in usecols if c not in names] if len(missing) > 0: raise ValueError( "Usecols do not match columns, " "columns expected but not found: {missing}".format(missing=missing) ) return usecols
<SYSTEM_TASK:> Validate the 'usecols' parameter. <END_TASK> <USER_TASK:> Description: def _validate_usecols_arg(usecols): """ Validate the 'usecols' parameter. Checks whether or not the 'usecols' parameter contains all integers (column selection by index), strings (column by name) or is a callable. Raises a ValueError if that is not the case. Parameters ---------- usecols : list-like, callable, or None List of columns to use when parsing or a callable that can be used to filter a list of table columns. Returns ------- usecols_tuple : tuple A tuple of (verified_usecols, usecols_dtype). 'verified_usecols' is either a set if an array-like is passed in or 'usecols' if a callable or None is passed in. 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like is passed in or None if a callable or None is passed in. """
msg = ("'usecols' must either be list-like of all strings, all unicode, " "all integers or a callable.") if usecols is not None: if callable(usecols): return usecols, None if not is_list_like(usecols): # see gh-20529 # # Ensure it is iterable container but not string. raise ValueError(msg) usecols_dtype = lib.infer_dtype(usecols, skipna=False) if usecols_dtype not in ("empty", "integer", "string", "unicode"): raise ValueError(msg) usecols = set(usecols) return usecols, usecols_dtype return usecols, None
<SYSTEM_TASK:> Check whether or not the 'parse_dates' parameter <END_TASK> <USER_TASK:> Description: def _validate_parse_dates_arg(parse_dates): """ Check whether or not the 'parse_dates' parameter is a non-boolean scalar. Raises a ValueError if that is the case. """
msg = ("Only booleans, lists, and " "dictionaries are accepted " "for the 'parse_dates' parameter") if parse_dates is not None: if is_scalar(parse_dates): if not lib.is_bool(parse_dates): raise TypeError(msg) elif not isinstance(parse_dates, (list, dict)): raise TypeError(msg) return parse_dates
<SYSTEM_TASK:> return a stringified and numeric for these values <END_TASK> <USER_TASK:> Description: def _stringify_na_values(na_values): """ return a stringified and numeric for these values """
result = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) # we are like 999 here if v == int(v): v = int(v) result.append("{value}.0".format(value=v)) result.append(str(v)) result.append(v) except (TypeError, ValueError, OverflowError): pass try: result.append(int(x)) except (TypeError, ValueError, OverflowError): pass return set(result)
<SYSTEM_TASK:> Get the NaN values for a given column. <END_TASK> <USER_TASK:> Description: def _get_na_values(col, na_values, na_fvalues, keep_default_na): """ Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If `na_values` is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column. """
if isinstance(na_values, dict): if col in na_values: return na_values[col], na_fvalues[col] else: if keep_default_na: return _NA_VALUES, set() return set(), set() else: return na_values, na_fvalues
<SYSTEM_TASK:> extract and return the names, index_names, col_names <END_TASK> <USER_TASK:> Description: def _extract_multi_indexer_columns(self, header, index_names, col_names, passed_names=False): """ extract and return the names, index_names, col_names header is a list-of-lists returned from the parsers """
if len(header) < 2: return header[0], index_names, col_names, passed_names # the names are the tuples of the header that are not the index cols # 0 is the name of the index, assuming index_col is a list of column # numbers ic = self.index_col if ic is None: ic = [] if not isinstance(ic, (list, tuple, np.ndarray)): ic = [ic] sic = set(ic) # clean the index_names index_names = header.pop(-1) index_names, names, index_col = _clean_index_names(index_names, self.index_col, self.unnamed_cols) # extract the columns field_count = len(header[0]) def extract(r): return tuple(r[i] for i in range(field_count) if i not in sic) columns = lzip(*[extract(r) for r in header]) names = ic + columns # If we find unnamed columns all in a single # level, then our header was too long. for n in range(len(columns[0])): if all(compat.to_str(c[n]) in self.unnamed_cols for c in columns): raise ParserError( "Passed header=[{header}] are too many rows for this " "multi_index of columns" .format(header=','.join(str(x) for x in self.header)) ) # Clean the column names (if we have an index_col). if len(ic): col_names = [r[0] if (len(r[0]) and r[0] not in self.unnamed_cols) else None for r in header] else: col_names = [None] * len(header) passed_names = True return names, index_names, col_names, passed_names
<SYSTEM_TASK:> Infer types of values, possibly casting <END_TASK> <USER_TASK:> Description: def _infer_types(self, values, na_values, try_num_bool=True): """ Infer types of values, possibly casting Parameters ---------- values : ndarray na_values : set try_num_bool : bool, default try try to cast values to numeric (first preference) or boolean Returns: -------- converted : ndarray na_count : int """
na_count = 0 if issubclass(values.dtype.type, (np.number, np.bool_)): mask = algorithms.isin(values, list(na_values)) na_count = mask.sum() if na_count > 0: if is_integer_dtype(values): values = values.astype(np.float64) np.putmask(values, mask, np.nan) return values, na_count if try_num_bool: try: result = lib.maybe_convert_numeric(values, na_values, False) na_count = isna(result).sum() except Exception: result = values if values.dtype == np.object_: na_count = parsers.sanitize_objects(result, na_values, False) else: result = values if values.dtype == np.object_: na_count = parsers.sanitize_objects(values, na_values, False) if result.dtype == np.object_ and try_num_bool: result = libops.maybe_convert_bool(np.asarray(values), true_values=self.true_values, false_values=self.false_values) return result, na_count
<SYSTEM_TASK:> Cast values to specified type <END_TASK> <USER_TASK:> Description: def _cast_types(self, values, cast_type, column): """ Cast values to specified type Parameters ---------- values : ndarray cast_type : string or np.dtype dtype to cast values to column : string column name - used only for error reporting Returns ------- converted : ndarray """
if is_categorical_dtype(cast_type): known_cats = (isinstance(cast_type, CategoricalDtype) and cast_type.categories is not None) if not is_object_dtype(values) and not known_cats: # XXX this is for consistency with # c-parser which parses all categories # as strings values = astype_nansafe(values, str) cats = Index(values).unique().dropna() values = Categorical._from_inferred_categories( cats, cats.get_indexer(values), cast_type, true_values=self.true_values) # use the EA's implementation of casting elif is_extension_array_dtype(cast_type): # ensure cast_type is an actual dtype and not a string cast_type = pandas_dtype(cast_type) array_type = cast_type.construct_array_type() try: return array_type._from_sequence_of_strings(values, dtype=cast_type) except NotImplementedError: raise NotImplementedError( "Extension Array: {ea} must implement " "_from_sequence_of_strings in order " "to be used in parser methods".format(ea=array_type)) else: try: values = astype_nansafe(values, cast_type, copy=True, skipna=True) except ValueError: raise ValueError( "Unable to convert column {column} to type " "{cast_type}".format( column=column, cast_type=cast_type)) return values
<SYSTEM_TASK:> Set the columns that should not undergo dtype conversions. <END_TASK> <USER_TASK:> Description: def _set_noconvert_columns(self): """ Set the columns that should not undergo dtype conversions. Currently, any column that is involved with date parsing will not undergo such conversions. """
names = self.orig_names if self.usecols_dtype == 'integer': # A set of integers will be converted to a list in # the correct order every single time. usecols = list(self.usecols) usecols.sort() elif (callable(self.usecols) or self.usecols_dtype not in ('empty', None)): # The names attribute should have the correct columns # in the proper order for indexing with parse_dates. usecols = self.names[:] else: # Usecols is empty. usecols = None def _set(x): if usecols is not None and is_integer(x): x = usecols[x] if not is_integer(x): x = names.index(x) self._reader.set_noconvert(x) if isinstance(self.parse_dates, list): for val in self.parse_dates: if isinstance(val, list): for k in val: _set(k) else: _set(val) elif isinstance(self.parse_dates, dict): for val in self.parse_dates.values(): if isinstance(val, list): for k in val: _set(k) else: _set(val) elif self.parse_dates: if isinstance(self.index_col, list): for k in self.index_col: _set(k) elif self.index_col is not None: _set(self.index_col)
<SYSTEM_TASK:> Sets self._col_indices <END_TASK> <USER_TASK:> Description: def _handle_usecols(self, columns, usecols_key): """ Sets self._col_indices usecols_key is used if there are string usecols. """
if self.usecols is not None: if callable(self.usecols): col_indices = _evaluate_usecols(self.usecols, usecols_key) elif any(isinstance(u, str) for u in self.usecols): if len(columns) > 1: raise ValueError("If using multiple headers, usecols must " "be integers.") col_indices = [] for col in self.usecols: if isinstance(col, str): try: col_indices.append(usecols_key.index(col)) except ValueError: _validate_usecols_names(self.usecols, usecols_key) else: col_indices.append(col) else: col_indices = self.usecols columns = [[n for i, n in enumerate(column) if i in col_indices] for column in columns] self._col_indices = col_indices return columns
<SYSTEM_TASK:> Checks whether the file begins with the BOM character. <END_TASK> <USER_TASK:> Description: def _check_for_bom(self, first_row): """ Checks whether the file begins with the BOM character. If it does, remove it. In addition, if there is quoting in the field subsequent to the BOM, remove it as well because it technically takes place at the beginning of the name, not the middle of it. """
# first_row will be a list, so we need to check # that that list is not empty before proceeding. if not first_row: return first_row # The first element of this row is the one that could have the # BOM that we want to remove. Check that the first element is a # string before proceeding. if not isinstance(first_row[0], str): return first_row # Check that the string is not empty, as that would # obviously not have a BOM at the start of it. if not first_row[0]: return first_row # Since the string is non-empty, check that it does # in fact begin with a BOM. first_elt = first_row[0][0] if first_elt != _BOM: return first_row first_row = first_row[0] if len(first_row) > 1 and first_row[1] == self.quotechar: start = 2 quote = first_row[1] end = first_row[2:].index(quote) + 2 # Extract the data between the quotation marks new_row = first_row[start:end] # Extract any remaining data after the second # quotation mark. if len(first_row) > end + 1: new_row += first_row[end + 1:] return [new_row] elif len(first_row) > 1: return [first_row[1:]] else: # First row is just the BOM, so we # return an empty string. return [""]
<SYSTEM_TASK:> Alert a user about a malformed row. <END_TASK> <USER_TASK:> Description: def _alert_malformed(self, msg, row_num): """ Alert a user about a malformed row. If `self.error_bad_lines` is True, the alert will be `ParserError`. If `self.warn_bad_lines` is True, the alert will be printed out. Parameters ---------- msg : The error message to display. row_num : The row number where the parsing error occurred. Because this row number is displayed, we 1-index, even though we 0-index internally. """
if self.error_bad_lines: raise ParserError(msg) elif self.warn_bad_lines: base = 'Skipping line {row_num}: '.format(row_num=row_num) sys.stderr.write(base + msg + '\n')
<SYSTEM_TASK:> Iterate through the lines and remove any that are <END_TASK> <USER_TASK:> Description: def _remove_empty_lines(self, lines): """ Iterate through the lines and remove any that are either empty or contain only one whitespace value Parameters ---------- lines : array-like The array of lines that we are to filter. Returns ------- filtered_lines : array-like The same array of lines with the "empty" ones removed. """
ret = [] for l in lines: # Remove empty lines and lines with only one whitespace value if (len(l) > 1 or len(l) == 1 and (not isinstance(l[0], str) or l[0].strip())): ret.append(l) return ret
<SYSTEM_TASK:> Read rows from self.f, skipping as specified. <END_TASK> <USER_TASK:> Description: def get_rows(self, infer_nrows, skiprows=None): """ Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped some rows here as well. Parameters ---------- infer_nrows : int Number of rows to read from self.f, not counting rows that are skipped. skiprows: set, optional Indices of rows to skip. Returns ------- detect_rows : list of str A list containing the rows to read. """
if skiprows is None: skiprows = set() buffer_rows = [] detect_rows = [] for i, row in enumerate(self.f): if i not in skiprows: detect_rows.append(row) buffer_rows.append(row) if len(detect_rows) >= infer_nrows: break self.buffer = iter(buffer_rows) return detect_rows
<SYSTEM_TASK:> Pack object `o` and write it to `stream` <END_TASK> <USER_TASK:> Description: def pack(o, stream, **kwargs): """ Pack object `o` and write it to `stream` See :class:`Packer` for options. """
packer = Packer(**kwargs) stream.write(packer.pack(o))
<SYSTEM_TASK:> Construct concatenation plan for given block manager and indexers. <END_TASK> <USER_TASK:> Description: def get_mgr_concatenation_plan(mgr, indexers): """ Construct concatenation plan for given block manager and indexers. Parameters ---------- mgr : BlockManager indexers : dict of {axis: indexer} Returns ------- plan : list of (BlockPlacement, JoinUnit) tuples """
# Calculate post-reindex shape , save for item axis which will be separate # for each block anyway. mgr_shape = list(mgr.shape) for ax, indexer in indexers.items(): mgr_shape[ax] = len(indexer) mgr_shape = tuple(mgr_shape) if 0 in indexers: ax0_indexer = indexers.pop(0) blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) else: if mgr._is_single_block: blk = mgr.blocks[0] return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] ax0_indexer = None blknos = mgr._blknos blklocs = mgr._blklocs plan = [] for blkno, placements in libinternals.get_blkno_placements(blknos, mgr.nblocks, group=False): assert placements.is_slice_like join_unit_indexers = indexers.copy() shape = list(mgr_shape) shape[0] = len(placements) shape = tuple(shape) if blkno == -1: unit = JoinUnit(None, shape) else: blk = mgr.blocks[blkno] ax0_blk_indexer = blklocs[placements.indexer] unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and # Fastpath detection of join unit not # needing to reindex its block: no ax0 # reindexing took place and block # placement was sequential before. ((ax0_indexer is None and blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1) or # Slow-ish detection: all indexer locs # are sequential (and length match is # checked above). (np.diff(ax0_blk_indexer) == 1).all())) # Omit indexer if no item reindexing is required. if unit_no_ax0_reindexing: join_unit_indexers.pop(0, None) else: join_unit_indexers[0] = ax0_blk_indexer unit = JoinUnit(blk, shape, join_unit_indexers) plan.append((placements, unit)) return plan
<SYSTEM_TASK:> Concatenate values from several join units along selected axis. <END_TASK> <USER_TASK:> Description: def concatenate_join_units(join_units, concat_axis, copy): """ Concatenate values from several join units along selected axis. """
if concat_axis == 0 and len(join_units) > 1: # Concatenating join units along ax0 is handled in _merge_blocks. raise AssertionError("Concatenating join units along axis0") empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) for ju in join_units] if len(to_concat) == 1: # Only one block, nothing to concatenate. concat_values = to_concat[0] if copy: if isinstance(concat_values, np.ndarray): # non-reindexed (=not yet copied) arrays are made into a view # in JoinUnit.get_reindexed_values if concat_values.base is not None: concat_values = concat_values.copy() else: concat_values = concat_values.copy() else: concat_values = _concat._concat_compat(to_concat, axis=concat_axis) return concat_values
<SYSTEM_TASK:> Reduce join_unit's shape along item axis to length. <END_TASK> <USER_TASK:> Description: def trim_join_unit(join_unit, length): """ Reduce join_unit's shape along item axis to length. Extra items that didn't fit are returned as a separate block. """
if 0 not in join_unit.indexers: extra_indexers = join_unit.indexers if join_unit.block is None: extra_block = None else: extra_block = join_unit.block.getitem_block(slice(length, None)) join_unit.block = join_unit.block.getitem_block(slice(length)) else: extra_block = join_unit.block extra_indexers = copy.copy(join_unit.indexers) extra_indexers[0] = extra_indexers[0][length:] join_unit.indexers[0] = join_unit.indexers[0][:length] extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] join_unit.shape = (length,) + join_unit.shape[1:] return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
<SYSTEM_TASK:> Combine multiple concatenation plans into one. <END_TASK> <USER_TASK:> Description: def combine_concat_plans(plans, concat_axis): """ Combine multiple concatenation plans into one. existing_plan is updated in-place. """
if len(plans) == 1: for p in plans[0]: yield p[0], [p[1]] elif concat_axis == 0: offset = 0 for plan in plans: last_plc = None for plc, unit in plan: yield plc.add(offset), [unit] last_plc = plc if last_plc is not None: offset += last_plc.as_slice.stop else: num_ended = [0] def _next_or_none(seq): retval = next(seq, None) if retval is None: num_ended[0] += 1 return retval plans = list(map(iter, plans)) next_items = list(map(_next_or_none, plans)) while num_ended[0] != len(next_items): if num_ended[0] > 0: raise ValueError("Plan shapes are not aligned") placements, units = zip(*next_items) lengths = list(map(len, placements)) min_len, max_len = min(lengths), max(lengths) if min_len == max_len: yield placements[0], units next_items[:] = map(_next_or_none, plans) else: yielded_placement = None yielded_units = [None] * len(next_items) for i, (plc, unit) in enumerate(next_items): yielded_units[i] = unit if len(plc) > min_len: # trim_join_unit updates unit in place, so only # placement needs to be sliced to skip min_len. next_items[i] = (plc[min_len:], trim_join_unit(unit, min_len)) else: yielded_placement = plc next_items[i] = _next_or_none(plans[i]) yield yielded_placement, yielded_units
<SYSTEM_TASK:> Temporarily set a parameter value using the with statement. <END_TASK> <USER_TASK:> Description: def use(self, key, value): """ Temporarily set a parameter value using the with statement. Aliasing allowed. """
old_value = self[key] try: self[key] = value yield self finally: self[key] = old_value
<SYSTEM_TASK:> Convert dtype types to stata types. Returns the byte of the given ordinal. <END_TASK> <USER_TASK:> Description: def _dtype_to_stata_type(dtype, column): """ Convert dtype types to stata types. Returns the byte of the given ordinal. See TYPE_MAP and comments for an explanation. This is also explained in the dta spec. 1 - 244 are strings of this length Pandas Stata 251 - for int8 byte 252 - for int16 int 253 - for int32 long 254 - for float32 float 255 - for double double If there are dates to convert, then dtype will already have the correct type inserted. """
# TODO: expand to handle datetime to integer conversion if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? itemsize = max_len_string_array(ensure_object(column.values)) return max(itemsize, 1) elif dtype == np.float64: return 255 elif dtype == np.float32: return 254 elif dtype == np.int32: return 253 elif dtype == np.int16: return 252 elif dtype == np.int8: return 251 else: # pragma : no cover raise NotImplementedError( "Data type {dtype} not supported.".format(dtype=dtype))
<SYSTEM_TASK:> Map numpy dtype to stata's default format for this type. Not terribly <END_TASK> <USER_TASK:> Description: def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, force_strl=False): """ Map numpy dtype to stata's default format for this type. Not terribly important since users can change this in Stata. Semantics are object -> "%DDs" where DD is the length of the string. If not a string, raise ValueError float64 -> "%10.0g" float32 -> "%9.0g" int64 -> "%9.0g" int32 -> "%12.0g" int16 -> "%8.0g" int8 -> "%8.0g" strl -> "%9s" """
# TODO: Refactor to combine type with format # TODO: expand this to handle a default datetime format? if dta_version < 117: max_str_len = 244 else: max_str_len = 2045 if force_strl: return '%9s' if dtype.type == np.object_: inferred_dtype = infer_dtype(column, skipna=True) if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Column `{col}` cannot be exported.\n\nOnly ' 'string-like object arrays containing all ' 'strings or a mix of strings and None can be ' 'exported. Object arrays containing only null ' 'values are prohibited. Other object types' 'cannot be exported and must first be converted ' 'to one of the supported ' 'types.'.format(col=column.name)) itemsize = max_len_string_array(ensure_object(column.values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' else: raise ValueError(excessive_string_length_error % column.name) return "%" + str(max(itemsize, 1)) + "s" elif dtype == np.float64: return "%10.0g" elif dtype == np.float32: return "%9.0g" elif dtype == np.int32: return "%12.0g" elif dtype == np.int8 or dtype == np.int16: return "%8.0g" else: # pragma : no cover raise NotImplementedError( "Data type {dtype} not supported.".format(dtype=dtype))
<SYSTEM_TASK:> Takes a bytes instance and pads it with null bytes until it's length chars. <END_TASK> <USER_TASK:> Description: def _pad_bytes_new(name, length): """ Takes a bytes instance and pads it with null bytes until it's length chars. """
if isinstance(name, str): name = bytes(name, 'utf-8') return name + b'\x00' * (length - len(name))
<SYSTEM_TASK:> Map between numpy and state dtypes <END_TASK> <USER_TASK:> Description: def _setup_dtype(self): """Map between numpy and state dtypes"""
if self._dtype is not None: return self._dtype dtype = [] # Convert struct data types to numpy data type for i, typ in enumerate(self.typlist): if typ in self.NUMPY_TYPE_MAP: dtype.append(('s' + str(i), self.byteorder + self.NUMPY_TYPE_MAP[typ])) else: dtype.append(('s' + str(i), 'S' + str(typ))) dtype = np.dtype(dtype) self._dtype = dtype return self._dtype
<SYSTEM_TASK:> Helper to call encode before writing to file for Python 3 compat. <END_TASK> <USER_TASK:> Description: def _write(self, to_write): """ Helper to call encode before writing to file for Python 3 compat. """
self._file.write(to_write.encode(self._encoding or self._default_encoding))
<SYSTEM_TASK:> Check for categorical columns, retain categorical information for <END_TASK> <USER_TASK:> Description: def _prepare_categoricals(self, data): """Check for categorical columns, retain categorical information for Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data] self._is_col_cat = is_cat self._value_labels = [] if not any(is_cat): return data get_base_missing_value = StataMissingValue.get_base_missing_value data_formatted = [] for col, col_is_cat in zip(data, is_cat): if col_is_cat: self._value_labels.append(StataValueLabel(data[col])) dtype = data[col].cat.codes.dtype if dtype == np.int64: raise ValueError('It is not possible to export ' 'int64-based categorical data to Stata.') values = data[col].cat.codes.values.copy() # Upcast if needed so that correct missing values can be set if values.max() >= get_base_missing_value(dtype): if dtype == np.int8: dtype = np.int16 elif dtype == np.int16: dtype = np.int32 else: dtype = np.float64 values = np.array(values, dtype=dtype) # Replace missing values with Stata missing value for type values[values == -1] = get_base_missing_value(dtype) data_formatted.append((col, values)) else: data_formatted.append((col, data[col])) return DataFrame.from_dict(OrderedDict(data_formatted))
<SYSTEM_TASK:> Close the file if it was created by the writer. <END_TASK> <USER_TASK:> Description: def _close(self): """ Close the file if it was created by the writer. If a buffer or file-like object was passed in, for example a GzipFile, then leave this file open for the caller to close. In either case, attempt to flush the file contents to ensure they are written to disk (if supported) """
# Some file-like objects might not support flush try: self._file.flush() except AttributeError: pass if self._own_file: self._file.close()
<SYSTEM_TASK:> Generates the GSO lookup table for the DataFRame <END_TASK> <USER_TASK:> Description: def generate_table(self): """ Generates the GSO lookup table for the DataFRame Returns ------- gso_table : OrderedDict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to (v,o) values Notes ----- Modifies the DataFrame in-place. The DataFrame returned encodes the (v,o) values as uint64s. The encoding depends on teh dta version, and can be expressed as enc = v + o * 2 ** (o_size * 8) so that v is stored in the lower bits and o is in the upper bits. o_size is * 117: 4 * 118: 6 * 119: 5 """
gso_table = self._gso_table gso_df = self.df columns = list(gso_df.columns) selected = gso_df[self.columns] col_index = [(col, columns.index(col)) for col in self.columns] keys = np.empty(selected.shape, dtype=np.uint64) for o, (idx, row) in enumerate(selected.iterrows()): for j, (col, v) in enumerate(col_index): val = row[col] # Allow columns with mixed str and None (GH 23633) val = '' if val is None else val key = gso_table.get(val, None) if key is None: # Stata prefers human numbers key = (v + 1, o + 1) gso_table[val] = key keys[o, j] = self._convert_key(key) for i, col in enumerate(self.columns): gso_df[col] = keys[:, i] return gso_table, gso_df
<SYSTEM_TASK:> Generates the binary blob of GSOs that is written to the dta file. <END_TASK> <USER_TASK:> Description: def generate_blob(self, gso_table): """ Generates the binary blob of GSOs that is written to the dta file. Parameters ---------- gso_table : OrderedDict Ordered dictionary (str, vo) Returns ------- gso : bytes Binary content of dta file to be placed between strl tags Notes ----- Output format depends on dta version. 117 uses two uint32s to express v and o while 118+ uses a uint32 for v and a uint64 for o. """
# Format information # Length includes null term # 117 # GSOvvvvooootllllxxxxxxxxxxxxxxx...x # 3 u4 u4 u1 u4 string + null term # # 118, 119 # GSOvvvvooooooootllllxxxxxxxxxxxxxxx...x # 3 u4 u8 u1 u4 string + null term bio = BytesIO() gso = bytes('GSO', 'ascii') gso_type = struct.pack(self._byteorder + 'B', 130) null = struct.pack(self._byteorder + 'B', 0) v_type = self._byteorder + self._gso_v_type o_type = self._byteorder + self._gso_o_type len_type = self._byteorder + 'I' for strl, vo in gso_table.items(): if vo == (0, 0): continue v, o = vo # GSO bio.write(gso) # vvvv bio.write(struct.pack(v_type, v)) # oooo / oooooooo bio.write(struct.pack(o_type, o)) # t bio.write(gso_type) # llll utf8_string = bytes(strl, 'utf-8') bio.write(struct.pack(len_type, len(utf8_string) + 1)) # xxx...xxx bio.write(utf8_string) bio.write(null) bio.seek(0) return bio.read()
<SYSTEM_TASK:> Called twice during file write. The first populates the values in <END_TASK> <USER_TASK:> Description: def _write_map(self): """Called twice during file write. The first populates the values in the map with 0s. The second call writes the final map locations when all blocks have been written."""
if self._map is None: self._map = OrderedDict((('stata_data', 0), ('map', self._file.tell()), ('variable_types', 0), ('varnames', 0), ('sortlist', 0), ('formats', 0), ('value_label_names', 0), ('variable_labels', 0), ('characteristics', 0), ('data', 0), ('strls', 0), ('value_labels', 0), ('stata_data_close', 0), ('end-of-file', 0))) # Move to start of map self._file.seek(self._map['map']) bio = BytesIO() for val in self._map.values(): bio.write(struct.pack(self._byteorder + 'Q', val)) bio.seek(0) self._file.write(self._tag(bio.read(), 'map'))
<SYSTEM_TASK:> Update column names for conversion to strl if they might have been <END_TASK> <USER_TASK:> Description: def _update_strl_names(self): """Update column names for conversion to strl if they might have been changed to comply with Stata naming rules"""
# Update convert_strl if names changed for orig, new in self._converted_names.items(): if orig in self._convert_strl: idx = self._convert_strl.index(orig) self._convert_strl[idx] = new
<SYSTEM_TASK:> Convert columns to StrLs if either very large or in the <END_TASK> <USER_TASK:> Description: def _convert_strls(self, data): """Convert columns to StrLs if either very large or in the convert_strl variable"""
convert_cols = [ col for i, col in enumerate(data) if self.typlist[i] == 32768 or col in self._convert_strl] if convert_cols: ssw = StataStrLWriter(data, convert_cols) tab, new_data = ssw.generate_table() data = new_data self._strl_blob = ssw.generate_blob(tab) return data
<SYSTEM_TASK:> Register Pandas Formatters and Converters with matplotlib <END_TASK> <USER_TASK:> Description: def register(explicit=True): """ Register Pandas Formatters and Converters with matplotlib This function modifies the global ``matplotlib.units.registry`` dictionary. Pandas adds custom converters for * pd.Timestamp * pd.Period * np.datetime64 * datetime.datetime * datetime.date * datetime.time See Also -------- deregister_matplotlib_converter """
# Renamed in pandas.plotting.__init__ global _WARN if explicit: _WARN = False pairs = get_pairs() for type_, cls in pairs: converter = cls() if type_ in units.registry: previous = units.registry[type_] _mpl_units[type_] = previous units.registry[type_] = converter
<SYSTEM_TASK:> Remove pandas' formatters and converters <END_TASK> <USER_TASK:> Description: def deregister(): """ Remove pandas' formatters and converters Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before pandas registered its own units. Converters for pandas' own types like Timestamp and Period are removed completely. Converters for types pandas overwrites, like ``datetime.datetime``, are restored to their original value. See Also -------- deregister_matplotlib_converters """
# Renamed in pandas.plotting.__init__ for type_, cls in get_pairs(): # We use type to catch our classes directly, no inheritance if type(units.registry.get(type_)) is cls: units.registry.pop(type_) # restore the old keys for unit, formatter in _mpl_units.items(): if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: # make it idempotent by excluding ours. units.registry[unit] = formatter
<SYSTEM_TASK:> Returns a default spacing between consecutive ticks for annual data. <END_TASK> <USER_TASK:> Description: def _get_default_annual_spacing(nyears): """ Returns a default spacing between consecutive ticks for annual data. """
if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing) = (1, 2) elif nyears < 50: (min_spacing, maj_spacing) = (1, 5) elif nyears < 100: (min_spacing, maj_spacing) = (5, 10) elif nyears < 200: (min_spacing, maj_spacing) = (5, 25) elif nyears < 600: (min_spacing, maj_spacing) = (10, 50) else: factor = nyears // 1000 + 1 (min_spacing, maj_spacing) = (factor * 20, factor * 100) return (min_spacing, maj_spacing)
<SYSTEM_TASK:> Returns the indices where the given period changes. <END_TASK> <USER_TASK:> Description: def period_break(dates, period): """ Returns the indices where the given period changes. Parameters ---------- dates : PeriodIndex Array of intervals to monitor. period : string Name of the period to monitor. """
current = getattr(dates, period) previous = getattr(dates - 1 * dates.freq, period) return np.nonzero(current - previous)[0]
<SYSTEM_TASK:> Returns true if the ``label_flags`` indicate there is at least one label <END_TASK> <USER_TASK:> Description: def has_level_label(label_flags, vmin): """ Returns true if the ``label_flags`` indicate there is at least one label for this level. if the minimum view limit is not an exact integer, then the first tick label won't be shown, so we must adjust for that. """
if label_flags.size == 0 or (label_flags.size == 1 and label_flags[0] == 0 and vmin % 1 > 0.0): return False else: return True
<SYSTEM_TASK:> Set the view limits to include the data range. <END_TASK> <USER_TASK:> Description: def autoscale(self): """ Set the view limits to include the data range. """
dmin, dmax = self.datalim_to_dt() if dmin > dmax: dmax, dmin = dmin, dmax # We need to cap at the endpoints of valid datetime # TODO(wesm): unused? # delta = relativedelta(dmax, dmin) # try: # start = dmin - delta # except ValueError: # start = _from_ordinal(1.0) # try: # stop = dmax + delta # except ValueError: # # The magic number! # stop = _from_ordinal(3652059.9999999) dmin, dmax = self.datalim_to_dt() vmin = dates.date2num(dmin) vmax = dates.date2num(dmax) return self.nonsingular(vmin, vmax)
<SYSTEM_TASK:> Sets the view limits to the nearest multiples of base that contain the <END_TASK> <USER_TASK:> Description: def autoscale(self): """ Sets the view limits to the nearest multiples of base that contain the data. """
# requires matplotlib >= 0.98.0 (vmin, vmax) = self.axis.get_data_interval() locs = self._get_default_locs(vmin, vmax) (vmin, vmax) = locs[[0, -1]] if vmin == vmax: vmin -= 1 vmax += 1 return nonsingular(vmin, vmax)
<SYSTEM_TASK:> Create a Table schema from ``data``. <END_TASK> <USER_TASK:> Description: def build_table_schema(data, index=True, primary_key=None, version=True): """ Create a Table schema from ``data``. Parameters ---------- data : Series, DataFrame index : bool, default True Whether to include ``data.index`` in the schema. primary_key : bool or None, default True column names to designate as the primary key. The default `None` will set `'primaryKey'` to the index level or levels if the index is unique. version : bool, default True Whether to include a field `pandas_version` with the version of pandas that generated the schema. Returns ------- schema : dict Notes ----- See `_as_json_table_type` for conversion types. Timedeltas as converted to ISO8601 duration format with 9 decimal places after the seconds field for nanosecond precision. Categoricals are converted to the `any` dtype, and use the `enum` field constraint to list the allowed values. The `ordered` attribute is included in an `ordered` field. Examples -------- >>> df = pd.DataFrame( ... {'A': [1, 2, 3], ... 'B': ['a', 'b', 'c'], ... 'C': pd.date_range('2016-01-01', freq='d', periods=3), ... }, index=pd.Index(range(3), name='idx')) >>> build_table_schema(df) {'fields': [{'name': 'idx', 'type': 'integer'}, {'name': 'A', 'type': 'integer'}, {'name': 'B', 'type': 'string'}, {'name': 'C', 'type': 'datetime'}], 'pandas_version': '0.20.0', 'primaryKey': ['idx']} """
if index is True: data = set_default_names(data) schema = {} fields = [] if index: if data.index.nlevels > 1: for level in data.index.levels: fields.append(convert_pandas_type_to_json_field(level)) else: fields.append(convert_pandas_type_to_json_field(data.index)) if data.ndim > 1: for column, s in data.iteritems(): fields.append(convert_pandas_type_to_json_field(s)) else: fields.append(convert_pandas_type_to_json_field(data)) schema['fields'] = fields if index and data.index.is_unique and primary_key is None: if data.index.nlevels == 1: schema['primaryKey'] = [data.index.name] else: schema['primaryKey'] = data.index.names elif primary_key is not None: schema['primaryKey'] = primary_key if version: schema['pandas_version'] = '0.20.0' return schema
<SYSTEM_TASK:> Builds a DataFrame from a given schema <END_TASK> <USER_TASK:> Description: def parse_table_schema(json, precise_float): """ Builds a DataFrame from a given schema Parameters ---------- json : A JSON table schema precise_float : boolean Flag controlling precision when decoding string to double values, as dictated by ``read_json`` Returns ------- df : DataFrame Raises ------ NotImplementedError If the JSON table schema contains either timezone or timedelta data Notes ----- Because :func:`DataFrame.to_json` uses the string 'index' to denote a name-less :class:`Index`, this function sets the name of the returned :class:`DataFrame` to ``None`` when said string is encountered with a normal :class:`Index`. For a :class:`MultiIndex`, the same limitation applies to any strings beginning with 'level_'. Therefore, an :class:`Index` name of 'index' and :class:`MultiIndex` names starting with 'level_' are not supported. See Also -------- build_table_schema : Inverse function. pandas.read_json """
table = loads(json, precise_float=precise_float) col_order = [field['name'] for field in table['schema']['fields']] df = DataFrame(table['data'], columns=col_order)[col_order] dtypes = {field['name']: convert_json_field_to_pandas_type(field) for field in table['schema']['fields']} # Cannot directly use as_type with timezone data on object; raise for now if any(str(x).startswith('datetime64[ns, ') for x in dtypes.values()): raise NotImplementedError('table="orient" can not yet read timezone ' 'data') # No ISO constructor for Timedelta as of yet, so need to raise if 'timedelta64' in dtypes.values(): raise NotImplementedError('table="orient" can not yet read ' 'ISO-formatted Timedelta data') df = df.astype(dtypes) if 'primaryKey' in table['schema']: df = df.set_index(table['schema']['primaryKey']) if len(df.index.names) == 1: if df.index.name == 'index': df.index.name = None else: df.index.names = [None if x.startswith('level_') else x for x in df.index.names] return df
<SYSTEM_TASK:> Find the appropriate name to pin to an operation result. This result <END_TASK> <USER_TASK:> Description: def get_op_result_name(left, right): """ Find the appropriate name to pin to an operation result. This result should always be either an Index or a Series. Parameters ---------- left : {Series, Index} right : object Returns ------- name : object Usually a string """
# `left` is always a pd.Series when called from within ops if isinstance(right, (ABCSeries, pd.Index)): name = _maybe_match_name(left, right) else: name = left.name return name
<SYSTEM_TASK:> Try to find a name to attach to the result of an operation between <END_TASK> <USER_TASK:> Description: def _maybe_match_name(a, b): """ Try to find a name to attach to the result of an operation between a and b. If only one of these has a `name` attribute, return that name. Otherwise return a consensus name if they match of None if they have different names. Parameters ---------- a : object b : object Returns ------- name : str or None See Also -------- pandas.core.common.consensus_name_attr """
a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') if a_has and b_has: if a.name == b.name: return a.name else: # TODO: what if they both have np.nan for their names? return None elif a_has: return a.name elif b_has: return b.name return None
<SYSTEM_TASK:> Cast non-pandas objects to pandas types to unify behavior of arithmetic <END_TASK> <USER_TASK:> Description: def maybe_upcast_for_op(obj): """ Cast non-pandas objects to pandas types to unify behavior of arithmetic and comparison operations. Parameters ---------- obj: object Returns ------- out : object Notes ----- Be careful to call this *after* determining the `name` attribute to be attached to the result of the arithmetic operation. """
if type(obj) is datetime.timedelta: # GH#22390 cast up to Timedelta to rely on Timedelta # implementation; otherwise operation against numeric-dtype # raises TypeError return pd.Timedelta(obj) elif isinstance(obj, np.timedelta64) and not isna(obj): # In particular non-nanosecond timedelta64 needs to be cast to # nanoseconds, or else we get undesired behavior like # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') # The isna check is to avoid casting timedelta64("NaT"), which would # return NaT and incorrectly be treated as a datetime-NaT. return pd.Timedelta(obj) elif isinstance(obj, np.ndarray) and is_timedelta64_dtype(obj): # GH#22390 Unfortunately we need to special-case right-hand # timedelta64 dtypes because numpy casts integer dtypes to # timedelta64 when operating with timedelta64 return pd.TimedeltaIndex(obj) return obj
<SYSTEM_TASK:> Return a binary method that always raises a TypeError. <END_TASK> <USER_TASK:> Description: def make_invalid_op(name): """ Return a binary method that always raises a TypeError. Parameters ---------- name : str Returns ------- invalid_op : function """
def invalid_op(self, other=None): raise TypeError("cannot perform {name} with this index type: " "{typ}".format(name=name, typ=type(self).__name__)) invalid_op.__name__ = name return invalid_op
<SYSTEM_TASK:> Find the keyword arguments to pass to numexpr for the given operation. <END_TASK> <USER_TASK:> Description: def _gen_eval_kwargs(name): """ Find the keyword arguments to pass to numexpr for the given operation. Parameters ---------- name : str Returns ------- eval_kwargs : dict Examples -------- >>> _gen_eval_kwargs("__add__") {} >>> _gen_eval_kwargs("rtruediv") {'reversed': True, 'truediv': True} """
kwargs = {} # Series and Panel appear to only pass __add__, __radd__, ... # but DataFrame gets both these dunder names _and_ non-dunder names # add, radd, ... name = name.replace('__', '') if name.startswith('r'): if name not in ['radd', 'rand', 'ror', 'rxor']: # Exclude commutative operations kwargs['reversed'] = True if name in ['truediv', 'rtruediv']: kwargs['truediv'] = True if name in ['ne']: kwargs['masker'] = True return kwargs
<SYSTEM_TASK:> Find the operation string, if any, to pass to numexpr for this <END_TASK> <USER_TASK:> Description: def _get_opstr(op, cls): """ Find the operation string, if any, to pass to numexpr for this operation. Parameters ---------- op : binary operator cls : class Returns ------- op_str : string or None """
# numexpr is available for non-sparse classes subtyp = getattr(cls, '_subtyp', '') use_numexpr = 'sparse' not in subtyp if not use_numexpr: # if we're not using numexpr, then don't pass a str_rep return None return {operator.add: '+', radd: '+', operator.mul: '*', rmul: '*', operator.sub: '-', rsub: '-', operator.truediv: '/', rtruediv: '/', operator.floordiv: '//', rfloordiv: '//', operator.mod: None, # TODO: Why None for mod but '%' for rmod? rmod: '%', operator.pow: '**', rpow: '**', operator.eq: '==', operator.ne: '!=', operator.le: '<=', operator.lt: '<', operator.ge: '>=', operator.gt: '>', operator.and_: '&', rand_: '&', operator.or_: '|', ror_: '|', operator.xor: '^', rxor: '^', divmod: None, rdivmod: None}[op]
<SYSTEM_TASK:> Find the name to attach to this method according to conventions <END_TASK> <USER_TASK:> Description: def _get_op_name(op, special): """ Find the name to attach to this method according to conventions for special and non-special methods. Parameters ---------- op : binary operator special : bool Returns ------- op_name : str """
opname = op.__name__.strip('_') if special: opname = '__{opname}__'.format(opname=opname) return opname
<SYSTEM_TASK:> Make the appropriate substitutions for the given operation and class-typ <END_TASK> <USER_TASK:> Description: def _make_flex_doc(op_name, typ): """ Make the appropriate substitutions for the given operation and class-typ into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring to attach to a generated method. Parameters ---------- op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} typ : str {series, 'dataframe']} Returns ------- doc : str """
op_name = op_name.replace('__', '') op_desc = _op_descriptions[op_name] if op_desc['reversed']: equiv = 'other ' + op_desc['op'] + ' ' + typ else: equiv = typ + ' ' + op_desc['op'] + ' other' if typ == 'series': base_doc = _flex_doc_SERIES doc_no_examples = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) if op_desc['series_examples']: doc = doc_no_examples + op_desc['series_examples'] else: doc = doc_no_examples elif typ == 'dataframe': base_doc = _flex_doc_FRAME doc = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) elif typ == 'panel': base_doc = _flex_doc_PANEL doc = base_doc.format( desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse'] ) else: raise AssertionError('Invalid typ argument.') return doc
<SYSTEM_TASK:> Apply the function `op` to only non-null points in x and y. <END_TASK> <USER_TASK:> Description: def mask_cmp_op(x, y, op, allowed_types): """ Apply the function `op` to only non-null points in x and y. Parameters ---------- x : array-like y : array-like op : binary operation allowed_types : class or tuple of classes Returns ------- result : ndarray[bool] """
# TODO: Can we make the allowed_types arg unnecessary? xrav = x.ravel() result = np.empty(x.size, dtype=bool) if isinstance(y, allowed_types): yrav = y.ravel() mask = notna(xrav) & notna(yrav) result[mask] = op(np.array(list(xrav[mask])), np.array(list(yrav[mask]))) else: mask = notna(xrav) result[mask] = op(np.array(list(xrav[mask])), y) if op == operator.ne: # pragma: no cover np.putmask(result, ~mask, True) else: np.putmask(result, ~mask, False) result = result.reshape(x.shape) return result
<SYSTEM_TASK:> Identify cases where a DataFrame operation should dispatch to its <END_TASK> <USER_TASK:> Description: def should_series_dispatch(left, right, op): """ Identify cases where a DataFrame operation should dispatch to its Series counterpart. Parameters ---------- left : DataFrame right : DataFrame op : binary operator Returns ------- override : bool """
if left._is_mixed_type or right._is_mixed_type: return True if not len(left.columns) or not len(right.columns): # ensure obj.dtypes[0] exists for each obj return False ldtype = left.dtypes.iloc[0] rdtype = right.dtypes.iloc[0] if ((is_timedelta64_dtype(ldtype) and is_integer_dtype(rdtype)) or (is_timedelta64_dtype(rdtype) and is_integer_dtype(ldtype))): # numpy integer dtypes as timedelta64 dtypes in this scenario return True if is_datetime64_dtype(ldtype) and is_object_dtype(rdtype): # in particular case where right is an array of DateOffsets return True return False
<SYSTEM_TASK:> Wrap Series left in the given index_class to delegate the operation op <END_TASK> <USER_TASK:> Description: def dispatch_to_index_op(op, left, right, index_class): """ Wrap Series left in the given index_class to delegate the operation op to the index implementation. DatetimeIndex and TimedeltaIndex perform type checking, timezone handling, overflow checks, etc. Parameters ---------- op : binary operator (operator.add, operator.sub, ...) left : Series right : object index_class : DatetimeIndex or TimedeltaIndex Returns ------- result : object, usually DatetimeIndex, TimedeltaIndex, or Series """
left_idx = index_class(left) # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, # left_idx may inherit a freq from a cached DatetimeIndex. # See discussion in GH#19147. if getattr(left_idx, 'freq', None) is not None: left_idx = left_idx._shallow_copy(freq=None) try: result = op(left_idx, right) except NullFrequencyError: # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError # on add/sub of integers (or int-like). We re-raise as a TypeError. raise TypeError('incompatible type for a datetime/timedelta ' 'operation [{name}]'.format(name=op.__name__)) return result
<SYSTEM_TASK:> Assume that left or right is a Series backed by an ExtensionArray, <END_TASK> <USER_TASK:> Description: def dispatch_to_extension_op(op, left, right): """ Assume that left or right is a Series backed by an ExtensionArray, apply the operator defined by op. """
# The op calls will raise TypeError if the op is not defined # on the ExtensionArray # unbox Series and Index to arrays if isinstance(left, (ABCSeries, ABCIndexClass)): new_left = left._values else: new_left = left if isinstance(right, (ABCSeries, ABCIndexClass)): new_right = right._values else: new_right = right res_values = op(new_left, new_right) res_name = get_op_result_name(left, right) if op.__name__ in ['divmod', 'rdivmod']: return _construct_divmod_result( left, res_values, left.index, res_name) return _construct_result(left, res_values, left.index, res_name)
<SYSTEM_TASK:> divmod returns a tuple of like indexed series instead of a single series. <END_TASK> <USER_TASK:> Description: def _construct_divmod_result(left, result, index, name, dtype=None): """divmod returns a tuple of like indexed series instead of a single series. """
return ( _construct_result(left, result[0], index=index, name=name, dtype=dtype), _construct_result(left, result[1], index=index, name=name, dtype=dtype), )
<SYSTEM_TASK:> Apply binary operator `func` to self, other using alignment and fill <END_TASK> <USER_TASK:> Description: def _combine_series_frame(self, other, func, fill_value=None, axis=None, level=None): """ Apply binary operator `func` to self, other using alignment and fill conventions determined by the fill_value, axis, and level kwargs. Parameters ---------- self : DataFrame other : Series func : binary operator fill_value : object, default None axis : {0, 1, 'columns', 'index', None}, default None level : int or None, default None Returns ------- result : DataFrame """
if fill_value is not None: raise NotImplementedError("fill_value {fill} not supported." .format(fill=fill_value)) if axis is not None: axis = self._get_axis_number(axis) if axis == 0: return self._combine_match_index(other, func, level=level) else: return self._combine_match_columns(other, func, level=level) else: if not len(other): return self * np.nan if not len(self): # Ambiguous case, use _series so works with DataFrame return self._constructor(data=self._series, index=self.index, columns=self.columns) # default axis is columns return self._combine_match_columns(other, func, level=level)
<SYSTEM_TASK:> convert rhs to meet lhs dims if input is list, tuple or np.ndarray <END_TASK> <USER_TASK:> Description: def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right): msg = ('Unable to coerce to Series, length must be {req_len}: ' 'given {given_len}') if axis is not None and left._get_axis_name(axis) == 'index': if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: # Broadcast across columns right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: # Broadcast along rows right = to_series(right[0, :]) else: raise ValueError("Unable to coerce to DataFrame, shape " "must be {req_shape}: given {given_shape}" .format(req_shape=left.shape, given_shape=right.shape)) elif right.ndim > 2: raise ValueError('Unable to coerce to Series/DataFrame, dim ' 'must be <= 2: {dim}'.format(dim=right.shape)) elif (is_list_like(right) and not isinstance(right, (ABCSeries, ABCDataFrame))): # GH17901 right = to_series(right) return right
<SYSTEM_TASK:> For SparseSeries operation, coerce to float64 if the result is expected <END_TASK> <USER_TASK:> Description: def _cast_sparse_series_op(left, right, opname): """ For SparseSeries operation, coerce to float64 if the result is expected to have NaN or inf values Parameters ---------- left : SparseArray right : SparseArray opname : str Returns ------- left : SparseArray right : SparseArray """
from pandas.core.sparse.api import SparseDtype opname = opname.strip('_') # TODO: This should be moved to the array? if is_integer_dtype(left) and is_integer_dtype(right): # series coerces to float64 if result should have NaN/inf if opname in ('floordiv', 'mod') and (right.values == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) elif opname in ('rfloordiv', 'rmod') and (left.values == 0).any(): left = left.astype(SparseDtype(np.float64, left.fill_value)) right = right.astype(SparseDtype(np.float64, right.fill_value)) return left, right
<SYSTEM_TASK:> If the user passes a freq and another freq is inferred from passed data, <END_TASK> <USER_TASK:> Description: def validate_inferred_freq(freq, inferred_freq, freq_infer): """ If the user passes a freq and another freq is inferred from passed data, require that they match. Parameters ---------- freq : DateOffset or None inferred_freq : DateOffset or None freq_infer : bool Returns ------- freq : DateOffset or None freq_infer : bool Notes ----- We assume at this point that `maybe_infer_freq` has been called, so `freq` is either a DateOffset object or None. """
if inferred_freq is not None: if freq is not None and freq != inferred_freq: raise ValueError('Inferred frequency {inferred} from passed ' 'values does not conform to passed frequency ' '{passed}' .format(inferred=inferred_freq, passed=freq.freqstr)) elif freq is None: freq = inferred_freq freq_infer = False return freq, freq_infer
<SYSTEM_TASK:> Comparing a DateOffset to the string "infer" raises, so we need to <END_TASK> <USER_TASK:> Description: def maybe_infer_freq(freq): """ Comparing a DateOffset to the string "infer" raises, so we need to be careful about comparisons. Make a dummy variable `freq_infer` to signify the case where the given freq is "infer" and set freq to None to avoid comparison trouble later on. Parameters ---------- freq : {DateOffset, None, str} Returns ------- freq : {DateOffset, None} freq_infer : bool """
freq_infer = False if not isinstance(freq, DateOffset): # if a passed freq is None, don't infer automatically if freq != 'infer': freq = frequencies.to_offset(freq) else: freq_infer = True freq = None return freq, freq_infer
<SYSTEM_TASK:> Helper for coercing an input scalar or array to i8. <END_TASK> <USER_TASK:> Description: def _ensure_datetimelike_to_i8(other, to_utc=False): """ Helper for coercing an input scalar or array to i8. Parameters ---------- other : 1d array to_utc : bool, default False If True, convert the values to UTC before extracting the i8 values If False, extract the i8 values directly. Returns ------- i8 1d array """
from pandas import Index from pandas.core.arrays import PeriodArray if lib.is_scalar(other) and isna(other): return iNaT elif isinstance(other, (PeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)): # convert tz if needed if getattr(other, 'tz', None) is not None: if to_utc: other = other.tz_convert('UTC') else: other = other.tz_localize(None) else: try: return np.array(other, copy=False).view('i8') except TypeError: # period array cannot be coerced to int other = Index(other) return other.asi8
<SYSTEM_TASK:> Construct a scalar type from a string. <END_TASK> <USER_TASK:> Description: def _scalar_from_string( self, value: str, ) -> Union[Period, Timestamp, Timedelta, NaTType]: """ Construct a scalar type from a string. Parameters ---------- value : str Returns ------- Period, Timestamp, or Timedelta, or NaT Whatever the type of ``self._scalar_type`` is. Notes ----- This should call ``self._check_compatible_with`` before unboxing the result. """
raise AbstractMethodError(self)
<SYSTEM_TASK:> Verify that `self` and `other` are compatible. <END_TASK> <USER_TASK:> Description: def _check_compatible_with( self, other: Union[Period, Timestamp, Timedelta, NaTType], ) -> None: """ Verify that `self` and `other` are compatible. * DatetimeArray verifies that the timezones (if any) match * PeriodArray verifies that the freq matches * Timedelta has no verification In each case, NaT is considered compatible. Parameters ---------- other Raises ------ Exception """
raise AbstractMethodError(self)
<SYSTEM_TASK:> Convert to Index using specified date_format. <END_TASK> <USER_TASK:> Description: def strftime(self, date_format): """ Convert to Index using specified date_format. Return an Index of formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in `python string format doc <%(URL)s>`__. Parameters ---------- date_format : str Date format string (e.g. "%%Y-%%m-%%d"). Returns ------- Index Index of formatted strings. See Also -------- to_datetime : Convert the given argument to datetime. DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. DatetimeIndex.round : Round the DatetimeIndex to the specified freq. DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. Examples -------- >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), ... periods=3, freq='s') >>> rng.strftime('%%B %%d, %%Y, %%r') Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', 'March 10, 2018, 09:00:02 AM'], dtype='object') """
from pandas import Index return Index(self._format_native_types(date_format=date_format))
<SYSTEM_TASK:> Repeat elements of an array. <END_TASK> <USER_TASK:> Description: def repeat(self, repeats, *args, **kwargs): """ Repeat elements of an array. See Also -------- numpy.ndarray.repeat """
nv.validate_repeat(args, kwargs) values = self._data.repeat(repeats) return type(self)(values.view('i8'), dtype=self.dtype)
<SYSTEM_TASK:> Add a timedelta-like, Tick or TimedeltaIndex-like object <END_TASK> <USER_TASK:> Description: def _add_delta(self, other): """ Add a timedelta-like, Tick or TimedeltaIndex-like object to self, yielding an int64 numpy array Parameters ---------- delta : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : ndarray[int64] Notes ----- The result's name is set outside of _add_delta by the calling method (__add__ or __sub__), if necessary (i.e. for Indexes). """
if isinstance(other, (Tick, timedelta, np.timedelta64)): new_values = self._add_timedeltalike_scalar(other) elif is_timedelta64_dtype(other): # ndarray[timedelta64] or TimedeltaArray/index new_values = self._add_delta_tdi(other) return new_values
<SYSTEM_TASK:> Add a delta of a timedeltalike <END_TASK> <USER_TASK:> Description: def _add_timedeltalike_scalar(self, other): """ Add a delta of a timedeltalike return the i8 result view """
if isna(other): # i.e np.timedelta64("NaT"), not recognized by delta_to_nanoseconds new_values = np.empty(len(self), dtype='i8') new_values[:] = iNaT return new_values inc = delta_to_nanoseconds(other) new_values = checked_add_with_arr(self.asi8, inc, arr_mask=self._isnan).view('i8') new_values = self._maybe_mask_results(new_values) return new_values.view('i8')
<SYSTEM_TASK:> Add or subtract array-like of integers equivalent to applying <END_TASK> <USER_TASK:> Description: def _addsub_int_array(self, other, op): """ Add or subtract array-like of integers equivalent to applying `_time_shift` pointwise. Parameters ---------- other : Index, ExtensionArray, np.ndarray integer-dtype op : {operator.add, operator.sub} Returns ------- result : same class as self """
# _addsub_int_array is overriden by PeriodArray assert not is_period_dtype(self) assert op in [operator.add, operator.sub] if self.freq is None: # GH#19123 raise NullFrequencyError("Cannot shift with no freq") elif isinstance(self.freq, Tick): # easy case where we can convert to timedelta64 operation td = Timedelta(self.freq) return op(self, td * other) # We should only get here with DatetimeIndex; dispatch # to _addsub_offset_array assert not is_timedelta64_dtype(self) return op(self, np.array(other) * self.freq)
<SYSTEM_TASK:> Add or subtract array-like of DateOffset objects <END_TASK> <USER_TASK:> Description: def _addsub_offset_array(self, other, op): """ Add or subtract array-like of DateOffset objects Parameters ---------- other : Index, np.ndarray object-dtype containing pd.DateOffset objects op : {operator.add, operator.sub} Returns ------- result : same class as self """
assert op in [operator.add, operator.sub] if len(other) == 1: return op(self, other[0]) warnings.warn("Adding/subtracting array of DateOffsets to " "{cls} not vectorized" .format(cls=type(self).__name__), PerformanceWarning) # For EA self.astype('O') returns a numpy array, not an Index left = lib.values_from_object(self.astype('O')) res_values = op(left, np.array(other)) kwargs = {} if not is_period_dtype(self): kwargs['freq'] = 'infer' return self._from_sequence(res_values, **kwargs)
<SYSTEM_TASK:> Ensure that we are re-localized. <END_TASK> <USER_TASK:> Description: def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', from_utc=False): """ Ensure that we are re-localized. This is for compat as we can then call this on all datetimelike arrays generally (ignored for Period/Timedelta) Parameters ---------- arg : Union[DatetimeLikeArray, DatetimeIndexOpsMixin, ndarray] ambiguous : str, bool, or bool-ndarray, default 'raise' nonexistent : str, default 'raise' from_utc : bool, default False If True, localize the i8 ndarray to UTC first before converting to the appropriate tz. If False, localize directly to the tz. Returns ------- localized array """
# reconvert to local tz tz = getattr(self, 'tz', None) if tz is not None: if not isinstance(arg, type(self)): arg = self._simple_new(arg) if from_utc: arg = arg.tz_localize('UTC').tz_convert(self.tz) else: arg = arg.tz_localize( self.tz, ambiguous=ambiguous, nonexistent=nonexistent ) return arg
<SYSTEM_TASK:> Return the minimum value of the Array or minimum along <END_TASK> <USER_TASK:> Description: def min(self, axis=None, skipna=True, *args, **kwargs): """ Return the minimum value of the Array or minimum along an axis. See Also -------- numpy.ndarray.min Index.min : Return the minimum value in an Index. Series.min : Return the minimum value in a Series. """
nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna()) if isna(result): # Period._from_ordinal does not handle np.nan gracefully return NaT return self._box_func(result)
<SYSTEM_TASK:> Return the maximum value of the Array or maximum along <END_TASK> <USER_TASK:> Description: def max(self, axis=None, skipna=True, *args, **kwargs): """ Return the maximum value of the Array or maximum along an axis. See Also -------- numpy.ndarray.max Index.max : Return the maximum value in an Index. Series.max : Return the maximum value in a Series. """
# TODO: skipna is broken with max. # See https://github.com/pandas-dev/pandas/issues/24265 nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) mask = self.isna() if skipna: values = self[~mask].asi8 elif mask.any(): return NaT else: values = self.asi8 if not len(values): # short-circut for empty max / min return NaT result = nanops.nanmax(values, skipna=skipna) # Don't have to worry about NA `result`, since no NA went in. return self._box_func(result)
<SYSTEM_TASK:> Wrap comparison operations to convert Period-like to PeriodDtype <END_TASK> <USER_TASK:> Description: def _period_array_cmp(cls, op): """ Wrap comparison operations to convert Period-like to PeriodDtype """
opname = '__{name}__'.format(name=op.__name__) nat_result = opname == '__ne__' def wrapper(self, other): op = getattr(self.asi8, opname) if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)): return NotImplemented if is_list_like(other) and len(other) != len(self): raise ValueError("Lengths must match") if isinstance(other, Period): self._check_compatible_with(other) result = op(other.ordinal) elif isinstance(other, cls): self._check_compatible_with(other) result = op(other.asi8) mask = self._isnan | other._isnan if mask.any(): result[mask] = nat_result return result elif other is NaT: result = np.empty(len(self.asi8), dtype=bool) result.fill(nat_result) else: other = Period(other, freq=self.freq) result = op(other.ordinal) if self._hasnans: result[self._isnan] = nat_result return result return compat.set_function_name(wrapper, opname, cls)
<SYSTEM_TASK:> Helper function to render a consistent error message when raising <END_TASK> <USER_TASK:> Description: def _raise_on_incompatible(left, right): """ Helper function to render a consistent error message when raising IncompatibleFrequency. Parameters ---------- left : PeriodArray right : DateOffset, Period, ndarray, or timedelta-like Raises ------ IncompatibleFrequency """
# GH#24283 error message format depends on whether right is scalar if isinstance(right, np.ndarray): other_freq = None elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period, DateOffset)): other_freq = right.freqstr else: other_freq = _delta_to_tick(Timedelta(right)).freqstr msg = DIFFERENT_FREQ.format(cls=type(left).__name__, own_freq=left.freqstr, other_freq=other_freq) raise IncompatibleFrequency(msg)
<SYSTEM_TASK:> Construct a new PeriodArray from a sequence of Period scalars. <END_TASK> <USER_TASK:> Description: def period_array( data: Sequence[Optional[Period]], freq: Optional[Tick] = None, copy: bool = False, ) -> PeriodArray: """ Construct a new PeriodArray from a sequence of Period scalars. Parameters ---------- data : Sequence of Period objects A sequence of Period objects. These are required to all have the same ``freq.`` Missing values can be indicated by ``None`` or ``pandas.NaT``. freq : str, Tick, or Offset The frequency of every element of the array. This can be specified to avoid inferring the `freq` from `data`. copy : bool, default False Whether to ensure a copy of the data is made. Returns ------- PeriodArray See Also -------- PeriodArray pandas.PeriodIndex Examples -------- >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A')]) <PeriodArray> ['2017', '2018'] Length: 2, dtype: period[A-DEC] >>> period_array([pd.Period('2017', freq='A'), ... pd.Period('2018', freq='A'), ... pd.NaT]) <PeriodArray> ['2017', '2018', 'NaT'] Length: 3, dtype: period[A-DEC] Integers that look like years are handled >>> period_array([2000, 2001, 2002], freq='D') ['2000-01-01', '2001-01-01', '2002-01-01'] Length: 3, dtype: period[D] Datetime-like strings may also be passed >>> period_array(['2000-Q1', '2000-Q2', '2000-Q3', '2000-Q4'], freq='Q') <PeriodArray> ['2000Q1', '2000Q2', '2000Q3', '2000Q4'] Length: 4, dtype: period[Q-DEC] """
if is_datetime64_dtype(data): return PeriodArray._from_datetime64(data, freq) if isinstance(data, (ABCPeriodIndex, ABCSeries, PeriodArray)): return PeriodArray(data, freq) # other iterable of some kind if not isinstance(data, (np.ndarray, list, tuple)): data = list(data) data = np.asarray(data) if freq: dtype = PeriodDtype(freq) else: dtype = None if is_float_dtype(data) and len(data) > 0: raise TypeError("PeriodIndex does not allow " "floating point in construction") data = ensure_object(data) return PeriodArray._from_sequence(data, dtype=dtype)
<SYSTEM_TASK:> If both a dtype and a freq are available, ensure they match. If only <END_TASK> <USER_TASK:> Description: def validate_dtype_freq(dtype, freq): """ If both a dtype and a freq are available, ensure they match. If only dtype is available, extract the implied freq. Parameters ---------- dtype : dtype freq : DateOffset or None Returns ------- freq : DateOffset Raises ------ ValueError : non-period dtype IncompatibleFrequency : mismatch between dtype and freq """
if freq is not None: freq = frequencies.to_offset(freq) if dtype is not None: dtype = pandas_dtype(dtype) if not is_period_dtype(dtype): raise ValueError('dtype must be PeriodDtype') if freq is None: freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency('specified freq and dtype ' 'are different') return freq
<SYSTEM_TASK:> Convert an datetime-like array to values Period ordinals. <END_TASK> <USER_TASK:> Description: def dt64arr_to_periodarr(data, freq, tz=None): """ Convert an datetime-like array to values Period ordinals. Parameters ---------- data : Union[Series[datetime64[ns]], DatetimeIndex, ndarray[datetime64ns]] freq : Optional[Union[str, Tick]] Must match the `freq` on the `data` if `data` is a DatetimeIndex or Series. tz : Optional[tzinfo] Returns ------- ordinals : ndarray[int] freq : Tick The frequencey extracted from the Series or DatetimeIndex if that's used. """
if data.dtype != np.dtype('M8[ns]'): raise ValueError('Wrong dtype: {dtype}'.format(dtype=data.dtype)) if freq is None: if isinstance(data, ABCIndexClass): data, freq = data._values, data.freq elif isinstance(data, ABCSeries): data, freq = data._values, data.dt.freq freq = Period._maybe_convert_freq(freq) if isinstance(data, (ABCIndexClass, ABCSeries)): data = data._values base, mult = libfrequencies.get_freq_code(freq) return libperiod.dt64arr_to_periodarr(data.view('i8'), base, tz), freq
<SYSTEM_TASK:> Construct a PeriodArray from a datetime64 array <END_TASK> <USER_TASK:> Description: def _from_datetime64(cls, data, freq, tz=None): """ Construct a PeriodArray from a datetime64 array Parameters ---------- data : ndarray[datetime64[ns], datetime64[ns, tz]] freq : str or Tick tz : tzinfo, optional Returns ------- PeriodArray[freq] """
data, freq = dt64arr_to_periodarr(data, freq, tz) return cls(data, freq=freq)
<SYSTEM_TASK:> actually format my specific types <END_TASK> <USER_TASK:> Description: def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): """ actually format my specific types """
values = self.astype(object) if date_format: formatter = lambda dt: dt.strftime(date_format) else: formatter = lambda dt: '%s' % dt if self._hasnans: mask = self._isnan values[mask] = na_rep imask = ~mask values[imask] = np.array([formatter(dt) for dt in values[imask]]) else: values = np.array([formatter(dt) for dt in values]) return values
<SYSTEM_TASK:> Add a timedelta-like, Tick, or TimedeltaIndex-like object <END_TASK> <USER_TASK:> Description: def _add_delta(self, other): """ Add a timedelta-like, Tick, or TimedeltaIndex-like object to self, yielding a new PeriodArray Parameters ---------- other : {timedelta, np.timedelta64, Tick, TimedeltaIndex, ndarray[timedelta64]} Returns ------- result : PeriodArray """
if not isinstance(self.freq, Tick): # We cannot add timedelta-like to non-tick PeriodArray _raise_on_incompatible(self, other) new_ordinals = super()._add_delta(other) return type(self)(new_ordinals, freq=self.freq)
<SYSTEM_TASK:> Arithmetic operations with timedelta-like scalars or array `other` <END_TASK> <USER_TASK:> Description: def _check_timedeltalike_freq_compat(self, other): """ Arithmetic operations with timedelta-like scalars or array `other` are only valid if `other` is an integer multiple of `self.freq`. If the operation is valid, find that integer multiple. Otherwise, raise because the operation is invalid. Parameters ---------- other : timedelta, np.timedelta64, Tick, ndarray[timedelta64], TimedeltaArray, TimedeltaIndex Returns ------- multiple : int or ndarray[int64] Raises ------ IncompatibleFrequency """
assert isinstance(self.freq, Tick) # checked by calling function own_offset = frequencies.to_offset(self.freq.rule_code) base_nanos = delta_to_nanoseconds(own_offset) if isinstance(other, (timedelta, np.timedelta64, Tick)): nanos = delta_to_nanoseconds(other) elif isinstance(other, np.ndarray): # numpy timedelta64 array; all entries must be compatible assert other.dtype.kind == 'm' if other.dtype != _TD_DTYPE: # i.e. non-nano unit # TODO: disallow unit-less timedelta64 other = other.astype(_TD_DTYPE) nanos = other.view('i8') else: # TimedeltaArray/Index nanos = other.asi8 if np.all(nanos % base_nanos == 0): # nanos being added is an integer multiple of the # base-frequency to self.freq delta = nanos // base_nanos # delta is the integer (or integer-array) number of periods # by which will be added to self. return delta _raise_on_incompatible(self, other)
<SYSTEM_TASK:> Detect missing values. Treat None, NaN, INF, -INF as null. <END_TASK> <USER_TASK:> Description: def _isna_old(obj): """Detect missing values. Treat None, NaN, INF, -INF as null. Parameters ---------- arr: ndarray or object value Returns ------- boolean ndarray or boolean """
if is_scalar(obj): return libmissing.checknull_old(obj) # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): return _isna_ndarraylike_old(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=_isna_old)) elif isinstance(obj, list): return _isna_ndarraylike_old(np.asarray(obj, dtype=object)) elif hasattr(obj, '__array__'): return _isna_ndarraylike_old(np.asarray(obj)) else: return obj is None
<SYSTEM_TASK:> if we have a compatible fill_value and arr dtype, then fill <END_TASK> <USER_TASK:> Description: def _maybe_fill(arr, fill_value=np.nan): """ if we have a compatible fill_value and arr dtype, then fill """
if _isna_compat(arr, fill_value): arr.fill(fill_value) return arr
<SYSTEM_TASK:> Helper function to convert DataFrame and Series to matplotlib.table <END_TASK> <USER_TASK:> Description: def table(ax, data, rowLabels=None, colLabels=None, **kwargs): """ Helper function to convert DataFrame and Series to matplotlib.table Parameters ---------- ax : Matplotlib axes object data : DataFrame or Series data for table contents kwargs : keywords, optional keyword arguments which passed to matplotlib.table.table. If `rowLabels` or `colLabels` is not specified, data index or column name will be used. Returns ------- matplotlib table object """
if isinstance(data, ABCSeries): data = data.to_frame() elif isinstance(data, ABCDataFrame): pass else: raise ValueError('Input data must be DataFrame or Series') if rowLabels is None: rowLabels = data.index if colLabels is None: colLabels = data.columns cellText = data.values import matplotlib.table table = matplotlib.table.table(ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs) return table
<SYSTEM_TASK:> Create a figure with a set of subplots already made. <END_TASK> <USER_TASK:> Description: def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True, subplot_kw=None, ax=None, layout=None, layout_type='box', **fig_kw): """Create a figure with a set of subplots already made. This utility wrapper makes it convenient to create common layouts of subplots, including the enclosing figure object, in a single call. Keyword arguments: naxes : int Number of required axes. Exceeded axes are set invisible. Default is nrows * ncols. sharex : bool If True, the X axis will be shared amongst all subplots. sharey : bool If True, the Y axis will be shared amongst all subplots. squeeze : bool If True, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If False, no squeezing is done: the returned axis object is always a 2-d array containing Axis instances, even if it ends up being 1x1. subplot_kw : dict Dict with keywords passed to the add_subplot() call used to create each subplots. ax : Matplotlib axis object, optional layout : tuple Number of rows and columns of the subplot grid. If not specified, calculated from naxes and layout_type layout_type : {'box', 'horziontal', 'vertical'}, default 'box' Specify how to layout the subplot grid. fig_kw : Other keyword arguments to be passed to the figure() call. Note that all keywords not recognized above will be automatically included here. Returns: fig, ax : tuple - fig is the Matplotlib Figure object - ax can be either a single axis object or an array of axis objects if more than one subplot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. **Examples:** x = np.linspace(0, 2*np.pi, 400) y = np.sin(x**2) # Just a figure and one subplot f, ax = plt.subplots() ax.plot(x, y) ax.set_title('Simple plot') # Two subplots, unpack the output array immediately f, (ax1, ax2) = plt.subplots(1, 2, sharey=True) ax1.plot(x, y) ax1.set_title('Sharing Y axis') ax2.scatter(x, y) # Four polar axes plt.subplots(2, 2, subplot_kw=dict(polar=True)) """
import matplotlib.pyplot as plt if subplot_kw is None: subplot_kw = {} if ax is None: fig = plt.figure(**fig_kw) else: if is_list_like(ax): ax = _flatten(ax) if layout is not None: warnings.warn("When passing multiple axes, layout keyword is " "ignored", UserWarning) if sharex or sharey: warnings.warn("When passing multiple axes, sharex and sharey " "are ignored. These settings must be specified " "when creating axes", UserWarning, stacklevel=4) if len(ax) == naxes: fig = ax[0].get_figure() return fig, ax else: raise ValueError("The number of passed axes must be {0}, the " "same as the output plot".format(naxes)) fig = ax.get_figure() # if ax is passed and a number of subplots is 1, return ax as it is if naxes == 1: if squeeze: return fig, ax else: return fig, _flatten(ax) else: warnings.warn("To output multiple subplots, the figure containing " "the passed axes is being cleared", UserWarning, stacklevel=4) fig.clear() nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type) nplots = nrows * ncols # Create empty object array to hold all axes. It's easiest to make it 1-d # so we can just append subplots upon creation, and then axarr = np.empty(nplots, dtype=object) # Create first subplot separately, so we can share it if requested ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) if sharex: subplot_kw['sharex'] = ax0 if sharey: subplot_kw['sharey'] = ax0 axarr[0] = ax0 # Note off-by-one counting because add_subplot uses the MATLAB 1-based # convention. for i in range(1, nplots): kwds = subplot_kw.copy() # Set sharex and sharey to None for blank/dummy axes, these can # interfere with proper axis limits on the visible axes if # they share axes e.g. issue #7528 if i >= naxes: kwds['sharex'] = None kwds['sharey'] = None ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) axarr[i] = ax if naxes != nplots: for ax in axarr[naxes:]: ax.set_visible(False) _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: # Reshape the array to have the final desired dimension (nrow,ncol), # though discarding unneeded dimensions that equal 1. If we only have # one subplot, just return it instead of a 1-element array. if nplots == 1: axes = axarr[0] else: axes = axarr.reshape(nrows, ncols).squeeze() else: # returned axis array will be always 2-d, even if nrows=ncols=1 axes = axarr.reshape(nrows, ncols) return fig, axes
<SYSTEM_TASK:> Render tempita templates before calling cythonize <END_TASK> <USER_TASK:> Description: def maybe_cythonize(extensions, *args, **kwargs): """ Render tempita templates before calling cythonize """
if len(sys.argv) > 1 and 'clean' in sys.argv: # Avoid running cythonize on `python setup.py clean` # See https://github.com/cython/cython/issues/1495 return extensions if not cython: # Avoid trying to look up numpy when installing from sdist # https://github.com/pandas-dev/pandas/issues/25193 # TODO: See if this can be removed after pyproject.toml added. return extensions numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') # TODO: Is this really necessary here? for ext in extensions: if (hasattr(ext, 'include_dirs') and numpy_incl not in ext.include_dirs): ext.include_dirs.append(numpy_incl) build_ext.render_templates(_pxifiles) return cythonize(extensions, *args, **kwargs)
<SYSTEM_TASK:> Fast transform path for aggregations <END_TASK> <USER_TASK:> Description: def _transform_fast(self, result, obj, func_nm): """ Fast transform path for aggregations """
# if there were groups with no observations (Categorical only?) # try casting data to original dtype cast = self._transform_should_cast(func_nm) # for each col, reshape to to size of original frame # by take operation ids, _, ngroup = self.grouper.group_info output = [] for i, _ in enumerate(result.columns): res = algorithms.take_1d(result.iloc[:, i].values, ids) if cast: res = self._try_cast(res, obj.iloc[:, i]) output.append(res) return DataFrame._from_arrays(output, columns=result.columns, index=obj.index)