text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
get the data for this column
<END_TASK>
<USER_TASK:>
Description:
def get_attr(self):
""" get the data for this column """ |
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind() |
<SYSTEM_TASK:>
set the data for this column
<END_TASK>
<USER_TASK:>
Description:
def set_attr(self):
""" set the data for this column """ |
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype) |
<SYSTEM_TASK:>
compute and set our version
<END_TASK>
<USER_TASK:>
Description:
def set_version(self):
""" compute and set our version """ |
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple(int(x) for x in version.split('.'))
if len(self.version) == 2:
self.version = self.version + (0,)
except AttributeError:
self.version = (0, 0, 0) |
<SYSTEM_TASK:>
infer the axes of my storer
<END_TASK>
<USER_TASK:>
Description:
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """ |
s = self.storable
if s is None:
return False
self.get_attrs()
return True |
<SYSTEM_TASK:>
remove table keywords from kwargs and return
<END_TASK>
<USER_TASK:>
Description:
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
""" |
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs |
<SYSTEM_TASK:>
set our object attributes
<END_TASK>
<USER_TASK:>
Description:
def set_attrs(self):
""" set our object attributes """ |
self.attrs.encoding = self.encoding
self.attrs.errors = self.errors |
<SYSTEM_TASK:>
read an array for the specified node (off of group
<END_TASK>
<USER_TASK:>
Description:
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """ |
import tables
node = getattr(self.group, key)
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = node[0][start:stop]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = node[start:stop]
if dtype == 'datetime64':
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == 'timedelta64':
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret |
<SYSTEM_TASK:>
write a 0-len array
<END_TASK>
<USER_TASK:>
Description:
def write_array_empty(self, key, value):
""" write a 0-len array """ |
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape |
<SYSTEM_TASK:>
we don't support start, stop kwds in Sparse
<END_TASK>
<USER_TASK:>
Description:
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
""" |
kwargs = super().validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs |
<SYSTEM_TASK:>
write it as a collection of individual sparse series
<END_TASK>
<USER_TASK:>
Description:
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """ |
super().write(obj, **kwargs)
for name, ss in obj.items():
key = 'sparse_series_{name}'.format(name=name)
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns) |
<SYSTEM_TASK:>
validate against an existing table
<END_TASK>
<USER_TASK:>
Description:
def validate(self, other):
""" validate against an existing table """ |
if other is None:
return
if other.table_type != self.table_type:
raise TypeError(
"incompatible table_type with existing "
"[{other} - {self}]".format(
other=other.table_type, self=self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [{c}] on appending data "
"[{sax}] vs current table [{oax}]".format(
c=c, sax=sax, oax=oax))
# should never get here
raise Exception(
"invalid combinate of [{c}] on appending data [{sv}] vs "
"current table [{ov}]".format(c=c, sv=sv, ov=ov)) |
<SYSTEM_TASK:>
validate that we can store the multi-index; reset and return the
<END_TASK>
<USER_TASK:>
Description:
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
""" |
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table") |
<SYSTEM_TASK:>
based on our axes, compute the expected nrows
<END_TASK>
<USER_TASK:>
Description:
def nrows_expected(self):
""" based on our axes, compute the expected nrows """ |
return np.prod([i.cvalues.shape[0] for i in self.index_axes]) |
<SYSTEM_TASK:>
return a tuple of my permutated axes, non_indexable at the front
<END_TASK>
<USER_TASK:>
Description:
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front""" |
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes])) |
<SYSTEM_TASK:>
return a dict of the kinds allowable columns for this object
<END_TASK>
<USER_TASK:>
Description:
def queryables(self):
""" return a dict of the kinds allowable columns for this object """ |
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
) |
<SYSTEM_TASK:>
return the metadata pathname for this key
<END_TASK>
<USER_TASK:>
Description:
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """ |
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key) |
<SYSTEM_TASK:>
write out a meta data array to the key as a fixed-format Series
<END_TASK>
<USER_TASK:>
Description:
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
""" |
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, errors=self.errors,
nan_rep=self.nan_rep) |
<SYSTEM_TASK:>
return the meta data array for this key
<END_TASK>
<USER_TASK:>
Description:
def read_metadata(self, key):
""" return the meta data array for this key """ |
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None |
<SYSTEM_TASK:>
are we trying to operate on an old version?
<END_TASK>
<USER_TASK:>
Description:
def validate_version(self, where=None):
""" are we trying to operate on an old version? """ |
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning) |
<SYSTEM_TASK:>
validate the min_itemisze doesn't contain items that are not in the
<END_TASK>
<USER_TASK:>
Description:
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
""" |
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [{key}] which is not an axis or "
"data_column".format(key=k)) |
<SYSTEM_TASK:>
take the input data_columns and min_itemize and create a data
<END_TASK>
<USER_TASK:>
Description:
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
""" |
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels] |
<SYSTEM_TASK:>
process axes filters
<END_TASK>
<USER_TASK:>
Description:
def process_axes(self, obj, columns=None):
""" process axes filters """ |
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = ensure_index(getattr(obj, field).values)
filt = ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError("cannot find the field [{field}] for "
"filtering!".format(field=field))
obj = process_filter(field, filt)
return obj |
<SYSTEM_TASK:>
create the description of the table from the axes & values
<END_TASK>
<USER_TASK:>
Description:
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """ |
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = {a.cname: a.typ for a in self.axes}
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d |
<SYSTEM_TASK:>
return a single column from the table, generally only indexables
<END_TASK>
<USER_TASK:>
Description:
def read_column(self, column, where=None, start=None, stop=None):
"""return a single column from the table, generally only indexables
are interesting
""" |
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [{column}] can not be extracted individually; "
"it is not data indexable".format(column=column))
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding,
errors=self.errors
).take_data(),
a.tz, True), name=column)
raise KeyError(
"column [{column}] not found in the table".format(column=column)) |
<SYSTEM_TASK:>
we have n indexable columns, with an arbitrary number of data
<END_TASK>
<USER_TASK:>
Description:
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
""" |
if not self.read_axes(where=where, **kwargs):
return None
raise NotImplementedError("Panel is removed in pandas 0.25.0") |
<SYSTEM_TASK:>
we form the data into a 2-d including indexes,values,mask
<END_TASK>
<USER_TASK:>
Description:
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """ |
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isna(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues]) |
<SYSTEM_TASK:>
create the indexables from the table description
<END_TASK>
<USER_TASK:>
Description:
def indexables(self):
""" create the indexables from the table description """ |
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables |
<SYSTEM_TASK:>
Cast to a NumPy array with 'dtype'.
<END_TASK>
<USER_TASK:>
Description:
def astype(self, dtype, copy=True):
"""
Cast to a NumPy array with 'dtype'.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
copy : bool, default True
Whether to copy the data, even if not necessary. If False,
a copy is made only if the old dtype does not match the
new dtype.
Returns
-------
array : ndarray
NumPy ndarray with 'dtype' for its dtype.
""" |
return np.array(self, dtype=dtype, copy=copy) |
<SYSTEM_TASK:>
Return the indices that would sort this array.
<END_TASK>
<USER_TASK:>
Description:
def argsort(self, ascending=True, kind='quicksort', *args, **kwargs):
"""
Return the indices that would sort this array.
Parameters
----------
ascending : bool, default True
Whether the indices should result in an ascending
or descending sort.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
*args, **kwargs:
passed through to :func:`numpy.argsort`.
Returns
-------
index_array : ndarray
Array of indices that sort ``self``.
See Also
--------
numpy.argsort : Sorting implementation used internally.
""" |
# Implementor note: You have two places to override the behavior of
# argsort.
# 1. _values_for_argsort : construct the values passed to np.argsort
# 2. argsort : total control over sorting.
ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs)
values = self._values_for_argsort()
result = np.argsort(values, kind=kind, **kwargs)
if not ascending:
result = result[::-1]
return result |
<SYSTEM_TASK:>
Shift values by desired number.
<END_TASK>
<USER_TASK:>
Description:
def shift(
self,
periods: int = 1,
fill_value: object = None,
) -> ABCExtensionArray:
"""
Shift values by desired number.
Newly introduced missing values are filled with
``self.dtype.na_value``.
.. versionadded:: 0.24.0
Parameters
----------
periods : int, default 1
The number of periods to shift. Negative values are allowed
for shifting backwards.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default is ``self.dtype.na_value``
.. versionadded:: 0.24.0
Returns
-------
shifted : ExtensionArray
Notes
-----
If ``self`` is empty or ``periods`` is 0, a copy of ``self`` is
returned.
If ``periods > len(self)``, then an array of size
len(self) is returned, with all values filled with
``self.dtype.na_value``.
""" |
# Note: this implementation assumes that `self.dtype.na_value` can be
# stored in an instance of your ExtensionArray with `self.dtype`.
if not len(self) or periods == 0:
return self.copy()
if isna(fill_value):
fill_value = self.dtype.na_value
empty = self._from_sequence(
[fill_value] * min(abs(periods), len(self)),
dtype=self.dtype
)
if periods > 0:
a = empty
b = self[:-periods]
else:
a = self[abs(periods):]
b = empty
return self._concat_same_type([a, b]) |
<SYSTEM_TASK:>
Return an array and missing value suitable for factorization.
<END_TASK>
<USER_TASK:>
Description:
def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
"""
Return an array and missing value suitable for factorization.
Returns
-------
values : ndarray
An array suitable for factorization. This should maintain order
and be a supported dtype (Float64, Int64, UInt64, String, Object).
By default, the extension array is cast to object dtype.
na_value : object
The value in `values` to consider missing. This will be treated
as NA in the factorization routines, so it will be coded as
`na_sentinal` and not included in `uniques`. By default,
``np.nan`` is used.
Notes
-----
The values returned by this method are also used in
:func:`pandas.util.hash_pandas_object`.
""" |
return self.astype(object), np.nan |
<SYSTEM_TASK:>
Encode the extension array as an enumerated type.
<END_TASK>
<USER_TASK:>
Description:
def factorize(
self,
na_sentinel: int = -1,
) -> Tuple[np.ndarray, ABCExtensionArray]:
"""
Encode the extension array as an enumerated type.
Parameters
----------
na_sentinel : int, default -1
Value to use in the `labels` array to indicate missing values.
Returns
-------
labels : ndarray
An integer NumPy array that's an indexer into the original
ExtensionArray.
uniques : ExtensionArray
An ExtensionArray containing the unique values of `self`.
.. note::
uniques will *not* contain an entry for the NA value of
the ExtensionArray if there are any missing values present
in `self`.
See Also
--------
pandas.factorize : Top-level factorize method that dispatches here.
Notes
-----
:meth:`pandas.factorize` offers a `sort` keyword as well.
""" |
# Impelmentor note: There are two ways to override the behavior of
# pandas.factorize
# 1. _values_for_factorize and _from_factorize.
# Specify the values passed to pandas' internal factorization
# routines, and how to convert from those values back to the
# original ExtensionArray.
# 2. ExtensionArray.factorize.
# Complete control over factorization.
from pandas.core.algorithms import _factorize_array
arr, na_value = self._values_for_factorize()
labels, uniques = _factorize_array(arr, na_sentinel=na_sentinel,
na_value=na_value)
uniques = self._from_factorized(uniques, self)
return labels, uniques |
<SYSTEM_TASK:>
Formatting function for scalar values.
<END_TASK>
<USER_TASK:>
Description:
def _formatter(
self,
boxed: bool = False,
) -> Callable[[Any], Optional[str]]:
"""Formatting function for scalar values.
This is used in the default '__repr__'. The returned formatting
function receives instances of your scalar type.
Parameters
----------
boxed: bool, default False
An indicated for whether or not your array is being printed
within a Series, DataFrame, or Index (True), or just by
itself (False). This may be useful if you want scalar values
to appear differently within a Series versus on its own (e.g.
quoted or not).
Returns
-------
Callable[[Any], str]
A callable that gets instances of the scalar type and
returns a string. By default, :func:`repr` is used
when ``boxed=False`` and :func:`str` is used when
``boxed=True``.
""" |
if boxed:
return str
return repr |
<SYSTEM_TASK:>
Return a scalar result of performing the reduction operation.
<END_TASK>
<USER_TASK:>
Description:
def _reduce(self, name, skipna=True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Parameters
----------
name : str
Name of the function, supported values are:
{ any, all, min, max, sum, mean, median, prod,
std, var, sem, kurt, skew }.
skipna : bool, default True
If True, skip NaN values.
**kwargs
Additional keyword arguments passed to the reduction function.
Currently, `ddof` is the only supported kwarg.
Returns
-------
scalar
Raises
------
TypeError : subclass does not define reductions
""" |
raise TypeError("cannot perform {name} with type {dtype}".format(
name=name, dtype=self.dtype)) |
<SYSTEM_TASK:>
A class method that returns a method that will correspond to an
<END_TASK>
<USER_TASK:>
Description:
def _create_method(cls, op, coerce_to_dtype=True):
"""
A class method that returns a method that will correspond to an
operator for an ExtensionArray subclass, by dispatching to the
relevant operator defined on the individual elements of the
ExtensionArray.
Parameters
----------
op : function
An operator that takes arguments op(a, b)
coerce_to_dtype : bool, default True
boolean indicating whether to attempt to convert
the result to the underlying ExtensionArray dtype.
If it's not possible to create a new ExtensionArray with the
values, an ndarray is returned instead.
Returns
-------
Callable[[Any, Any], Union[ndarray, ExtensionArray]]
A method that can be bound to a class. When used, the method
receives the two arguments, one of which is the instance of
this class, and should return an ExtensionArray or an ndarray.
Returning an ndarray may be necessary when the result of the
`op` cannot be stored in the ExtensionArray. The dtype of the
ndarray uses NumPy's normal inference rules.
Example
-------
Given an ExtensionArray subclass called MyExtensionArray, use
>>> __add__ = cls._create_method(operator.add)
in the class definition of MyExtensionArray to create the operator
for addition, that will be based on the operator implementation
of the underlying elements of the ExtensionArray
""" |
def _binop(self, other):
def convert_values(param):
if isinstance(param, ExtensionArray) or is_list_like(param):
ovalues = param
else: # Assume its an object
ovalues = [param] * len(self)
return ovalues
if isinstance(other, (ABCSeries, ABCIndexClass)):
# rely on pandas to unbox and dispatch to us
return NotImplemented
lvalues = self
rvalues = convert_values(other)
# If the operator is not defined for the underlying objects,
# a TypeError should be raised
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
def _maybe_convert(arr):
if coerce_to_dtype:
# https://github.com/pandas-dev/pandas/issues/22850
# We catch all regular exceptions here, and fall back
# to an ndarray.
try:
res = self._from_sequence(arr)
except Exception:
res = np.asarray(arr)
else:
res = np.asarray(arr)
return res
if op.__name__ in {'divmod', 'rdivmod'}:
a, b = zip(*res)
res = _maybe_convert(a), _maybe_convert(b)
else:
res = _maybe_convert(res)
return res
op_name = ops._get_op_name(op, True)
return set_function_name(_binop, op_name, cls) |
<SYSTEM_TASK:>
Make an alias for a method of the underlying ExtensionArray.
<END_TASK>
<USER_TASK:>
Description:
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
""" |
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method |
<SYSTEM_TASK:>
Create a comparison method that dispatches to ``cls.values``.
<END_TASK>
<USER_TASK:>
Description:
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
""" |
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = '__{}__'.format(op.__name__)
return wrapper |
<SYSTEM_TASK:>
Create the join wrapper methods.
<END_TASK>
<USER_TASK:>
Description:
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
""" |
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper |
<SYSTEM_TASK:>
Return the minimum value of the Index or minimum along
<END_TASK>
<USER_TASK:>
Description:
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
""" |
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value |
<SYSTEM_TASK:>
Returns the indices of the minimum values along an axis.
<END_TASK>
<USER_TASK:>
Description:
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
""" |
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin() |
<SYSTEM_TASK:>
Return the maximum value of the Index or maximum along
<END_TASK>
<USER_TASK:>
Description:
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
""" |
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value |
<SYSTEM_TASK:>
Returns the indices of the maximum values along an axis.
<END_TASK>
<USER_TASK:>
Description:
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
""" |
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax() |
<SYSTEM_TASK:>
We don't allow integer or float indexing on datetime-like when using
<END_TASK>
<USER_TASK:>
Description:
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
""" |
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind) |
<SYSTEM_TASK:>
Compute boolean array of whether each index value is found in the
<END_TASK>
<USER_TASK:>
Description:
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
""" |
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8) |
<SYSTEM_TASK:>
Concatenate to_concat which has the same class.
<END_TASK>
<USER_TASK:>
Description:
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
""" |
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs) |
<SYSTEM_TASK:>
Shift index by desired number of time frequency increments.
<END_TASK>
<USER_TASK:>
Description:
def shift(self, periods, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
""" |
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name) |
<SYSTEM_TASK:>
Replaces values in a Series using the fill method specified when no
<END_TASK>
<USER_TASK:>
Description:
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
""" |
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result |
<SYSTEM_TASK:>
passed a manager and a axes dict
<END_TASK>
<USER_TASK:>
Description:
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """ |
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr |
<SYSTEM_TASK:>
Provide axes setup for the major PandasObjects.
<END_TASK>
<USER_TASK:>
Description:
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
""" |
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict) |
<SYSTEM_TASK:>
Return an axes dictionary for the passed axes.
<END_TASK>
<USER_TASK:>
Description:
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes.""" |
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d |
<SYSTEM_TASK:>
Map the axis to the block_manager axis.
<END_TASK>
<USER_TASK:>
Description:
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis.""" |
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis |
<SYSTEM_TASK:>
Return the space character free column resolvers of a dataframe.
<END_TASK>
<USER_TASK:>
Description:
def _get_space_character_free_column_resolvers(self):
"""Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
""" |
from pandas.core.computation.common import _remove_spaces_column_name
return {_remove_spaces_column_name(k): v for k, v
in self.iteritems()} |
<SYSTEM_TASK:>
Interchange axes and swap values axes appropriately.
<END_TASK>
<USER_TASK:>
Description:
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
""" |
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self) |
<SYSTEM_TASK:>
Return item and drop from frame. Raise KeyError if not found.
<END_TASK>
<USER_TASK:>
Description:
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
""" |
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result |
<SYSTEM_TASK:>
Squeeze 1 dimensional axis objects into scalars.
<END_TASK>
<USER_TASK:>
Description:
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes wil project directly into a scalar:
>>> df_0a.squeeze()
1
""" |
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple(0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes))]
except Exception:
return self |
<SYSTEM_TASK:>
Swap levels i and j in a MultiIndex on a particular axis
<END_TASK>
<USER_TASK:>
Description:
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
""" |
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result |
<SYSTEM_TASK:>
Set the name of the axis for the index or columns.
<END_TASK>
<USER_TASK:>
Description:
def rename_axis(self, mapper=sentinel, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
""" |
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
axis = kwargs.pop('axis', 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError('rename_axis() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
inplace = validate_bool_kwarg(inplace, 'inplace')
if (mapper is not sentinel):
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
# Deprecated (v0.21) behavior is if mapper is specified,
# and not a list or scalar, then call rename
msg = ("Using 'rename_axis' to alter labels is deprecated. "
"Use '.rename' instead")
warnings.warn(msg, FutureWarning, stacklevel=3)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is sentinel:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not
is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com._get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis,
inplace=True)
if not inplace:
return result |
<SYSTEM_TASK:>
Test whether two objects contain the same elements.
<END_TASK>
<USER_TASK:>
Description:
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
""" |
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data) |
<SYSTEM_TASK:>
Return the bool of a single element PandasObject.
<END_TASK>
<USER_TASK:>
Description:
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
""" |
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__() |
<SYSTEM_TASK:>
Test whether a key is a level reference for a given axis.
<END_TASK>
<USER_TASK:>
Description:
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
""" |
axis = self._get_axis_number(axis)
if self.ndim > 2:
raise NotImplementedError(
"_is_level_reference is not implemented for {type}"
.format(type=type(self)))
return (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
not self._is_label_reference(key, axis=axis)) |
<SYSTEM_TASK:>
Test whether a key is a label reference for a given axis.
<END_TASK>
<USER_TASK:>
Description:
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
""" |
if self.ndim > 2:
raise NotImplementedError(
"_is_label_reference is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (key is not None and
is_hashable(key) and
any(key in self.axes[ax] for ax in other_axes)) |
<SYSTEM_TASK:>
Test whether a key is a label or level reference for a given axis.
<END_TASK>
<USER_TASK:>
Description:
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
""" |
if self.ndim > 2:
raise NotImplementedError(
"_is_label_or_level_reference is not implemented for {type}"
.format(type=type(self)))
return (self._is_level_reference(key, axis=axis) or
self._is_label_reference(key, axis=axis)) |
<SYSTEM_TASK:>
Check whether `key` is ambiguous.
<END_TASK>
<USER_TASK:>
Description:
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
""" |
if self.ndim > 2:
raise NotImplementedError(
"_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
any(key in self.axes[ax] for ax in other_axes)):
# Build an informative and grammatical warning
level_article, level_type = (('an', 'index')
if axis == 0 else
('a', 'column'))
label_article, label_type = (('a', 'column')
if axis == 0 else
('an', 'index'))
msg = ("'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type)
raise ValueError(msg) |
<SYSTEM_TASK:>
Return a 1-D array of values associated with `key`, a label or level
<END_TASK>
<USER_TASK:>
Description:
def _get_label_or_level_values(self, key, axis=0):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
""" |
if self.ndim > 2:
raise NotImplementedError(
"_get_label_or_level_values is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(
self._get_axis(other_axes[0]), MultiIndex):
multi_message = ('\n'
'For a multi-index, the label must be a '
'tuple with elements corresponding to '
'each level.')
else:
multi_message = ''
label_axis_name = 'column' if axis == 0 else 'index'
raise ValueError(("The {label_axis_name} label '{key}' "
"is not unique.{multi_message}")
.format(key=key,
label_axis_name=label_axis_name,
multi_message=multi_message))
return values |
<SYSTEM_TASK:>
Indicator whether DataFrame is empty.
<END_TASK>
<USER_TASK:>
Description:
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
""" |
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS) |
<SYSTEM_TASK:>
Not a real Jupyter special repr method, but we use the same
<END_TASK>
<USER_TASK:>
Description:
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
""" |
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload |
<SYSTEM_TASK:>
Convert the object to a JSON string.
<END_TASK>
<USER_TASK:>
Description:
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression='infer',
index=True):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : string or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : string
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
""" |
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression,
index=index) |
<SYSTEM_TASK:>
Write the contained data to an HDF5 file using HDFStore.
<END_TASK>
<USER_TASK:>
Description:
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
""" |
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs) |
<SYSTEM_TASK:>
Serialize object to input file path using msgpack format.
<END_TASK>
<USER_TASK:>
Description:
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
Serialize object to input file path using msgpack format.
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
""" |
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
""" |
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) |
<SYSTEM_TASK:>
r"""
<END_TASK>
<USER_TASK:>
Description:
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : file descriptor or None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : str, optional
Format string for floating point numbers.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
Returns
-------
str or None
If buf is None, returns the resulting LateX format as a
string. Otherwise returns None.
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE
'\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon
\\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello &
purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n'
""" |
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue() |
<SYSTEM_TASK:>
Create an indexer like _name in the class.
<END_TASK>
<USER_TASK:>
Description:
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class.""" |
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__)) |
<SYSTEM_TASK:>
Return the cached item, item represents a label indexer.
<END_TASK>
<USER_TASK:>
Description:
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer.""" |
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res |
<SYSTEM_TASK:>
Set the _cacher attribute on the calling object with a weakref to
<END_TASK>
<USER_TASK:>
Description:
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
""" |
self._cacher = (item, weakref.ref(cacher)) |
<SYSTEM_TASK:>
Return the cached item, item represents a positional indexer.
<END_TASK>
<USER_TASK:>
Description:
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer.""" |
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number)
return lower |
<SYSTEM_TASK:>
See if we need to update our parent cacher if clear, then clear our
<END_TASK>
<USER_TASK:>
Description:
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
""" |
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache() |
<SYSTEM_TASK:>
Construct a slice of this container.
<END_TASK>
<USER_TASK:>
Description:
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
""" |
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result |
<SYSTEM_TASK:>
Check if we are a view, have a cacher, and are of mixed type.
<END_TASK>
<USER_TASK:>
Description:
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
""" |
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False |
<SYSTEM_TASK:>
Return data corresponding to axis labels matching criteria.
<END_TASK>
<USER_TASK:>
Description:
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria.
.. deprecated:: 0.21.0
Use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : same type as caller
""" |
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis}) |
<SYSTEM_TASK:>
Return an object with matching indices as other object.
<END_TASK>
<USER_TASK:>
Description:
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
""" |
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d) |
<SYSTEM_TASK:>
Drop labels from specified axis. Used in the ``drop`` method
<END_TASK>
<USER_TASK:>
Description:
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
""" |
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == 'raise' and indexer.all():
raise KeyError('{} not found in axis'.format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == 'raise' and labels_missing:
raise KeyError('{} not found in axis'.format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result |
<SYSTEM_TASK:>
Replace self internals with result.
<END_TASK>
<USER_TASK:>
Description:
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
""" |
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy) |
<SYSTEM_TASK:>
Prefix labels with string `prefix`.
<END_TASK>
<USER_TASK:>
Description:
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
""" |
f = functools.partial('{prefix}{}'.format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) |
<SYSTEM_TASK:>
Suffix labels with string `suffix`.
<END_TASK>
<USER_TASK:>
Description:
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
""" |
f = functools.partial('{}{suffix}'.format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper) |
<SYSTEM_TASK:>
Sort by the values along either axis.
<END_TASK>
<USER_TASK:>
Description:
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
""" |
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.") |
<SYSTEM_TASK:>
Perform the reindex for all the axes.
<END_TASK>
<USER_TASK:>
Description:
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes.""" |
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj |
<SYSTEM_TASK:>
Check if we do need a multi reindex.
<END_TASK>
<USER_TASK:>
Description:
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex.""" |
return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and
method is None and level is None and not self._is_mixed_type) |
<SYSTEM_TASK:>
allow_dups indicates an internal call here
<END_TASK>
<USER_TASK:>
Description:
def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False,
allow_dups=False):
"""allow_dups indicates an internal call here """ |
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(index, indexer, axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self) |
<SYSTEM_TASK:>
Subset rows or columns of dataframe according to labels in
<END_TASK>
<USER_TASK:>
Description:
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
""" |
import re
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError('Keyword arguments `items`, `like`, or `regex` '
'are mutually exclusive')
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(
**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in to_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(to_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`') |
<SYSTEM_TASK:>
Return a random sample of items from an axis of object.
<END_TASK>
<USER_TASK:>
Description:
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Sample with or without replacement.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
""" |
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, pd.DataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
else:
raise ValueError("Strings cannot be passed as weights "
"when sampling from a Series or Panel.")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
# Check for negative sizes
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False) |
<SYSTEM_TASK:>
add the string-like attributes from the info_axis.
<END_TASK>
<USER_TASK:>
Description:
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
""" |
additions = {c for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()}
return super()._dir_additions().union(additions) |
<SYSTEM_TASK:>
check whether we allow in-place setting with this type of value
<END_TASK>
<USER_TASK:>
Description:
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """ |
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except Exception:
pass
raise TypeError('Cannot do inplace boolean setting on '
'mixed-types with a non np.nan value')
return True |
<SYSTEM_TASK:>
Convert the frame to its Numpy-array representation.
<END_TASK>
<USER_TASK:>
Description:
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
.. deprecated:: 0.23.0
Use :meth:`DataFrame.values` instead.
Parameters
----------
columns : list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
See Also
--------
DataFrame.values
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a float64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
""" |
warnings.warn("Method .as_matrix will be removed in a future version. "
"Use .values instead.", FutureWarning, stacklevel=2)
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED,
items=columns) |
<SYSTEM_TASK:>
Return a Numpy representation of the DataFrame.
<END_TASK>
<USER_TASK:>
Description:
def values(self):
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
""" |
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED) |
<SYSTEM_TASK:>
Return counts of unique ftypes in this object.
<END_TASK>
<USER_TASK:>
Description:
def get_ftype_counts(self):
"""
Return counts of unique ftypes in this object.
.. deprecated:: 0.23.0
This is useful for SparseDataFrame or for DataFrames containing
sparse arrays.
Returns
-------
dtype : Series
Series with the count of columns with each type and
sparsity (dense/sparse).
See Also
--------
ftypes : Return ftypes (indication of sparse/dense and dtype) in
this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_ftype_counts() # doctest: +SKIP
float64:dense 1
int64:dense 1
object:dense 1
dtype: int64
""" |
warnings.warn("get_ftype_counts is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
from pandas import Series
return Series(self._data.get_ftype_counts()) |
<SYSTEM_TASK:>
Return the dtypes in the DataFrame.
<END_TASK>
<USER_TASK:>
Description:
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
DataFrame.ftypes : Dtype and sparsity information.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
""" |
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_) |
<SYSTEM_TASK:>
Convert the frame to a dict of dtype -> Constructor Types that each has
<END_TASK>
<USER_TASK:>
Description:
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> Constructor Types
""" |
warnings.warn("as_blocks is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return self._to_dict_of_blocks(copy=copy) |
<SYSTEM_TASK:>
Return a dict of dtype -> Constructor Types that
<END_TASK>
<USER_TASK:>
Description:
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
""" |
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()} |
<SYSTEM_TASK:>
Cast a pandas object to a specified dtype ``dtype``.
<END_TASK>
<USER_TASK:>
Description:
def astype(self, dtype, copy=True, errors='raise', **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
""" |
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError('Only the Series name can be used for '
'the key in Series dtype mappings.')
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
'astype() only accepts a dtype arg of type dict when '
'invoked on Series and DataFrames. A single dtype must be '
'specified when invoked on a Panel.'
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype=dtype[col_name], copy=copy,
errors=errors, **kwargs))
else:
results.append(results.append(col.copy() if copy else col))
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = (self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns)))
else:
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
**kwargs)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result |
<SYSTEM_TASK:>
Make a copy of this object's indices and data.
<END_TASK>
<USER_TASK:>
Description:
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series, DataFrame or Panel
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
""" |
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.