Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def maybe_infer_dtype_type(element):
tipo = None
if hasattr(element, 'dtype'):
tipo = element.dtype
elif is_list_like(element):
element = np.asarray(element)
tipo = element.dtype
return tipo | [
"Try to infer an object's dtype, for use in arithmetic ops\n\n Uses `element.dtype` if that's available.\n Objects implementing the iterator protocol are cast to a NumPy array,\n and from there the array's type is used.\n\n Parameters\n ----------\n element : object\n Possibly has a `.dtype` attribute, and possibly the iterator\n protocol.\n\n Returns\n -------\n tipo : type\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Foo = namedtuple(\"Foo\", \"dtype\")\n >>> maybe_infer_dtype_type(Foo(np.dtype(\"i8\")))\n numpy.int64\n "
] |
Please provide a description of the function:def maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
if is_extension_type(values):
if copy:
values = values.copy()
else:
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value | [
" provide explicit type promotion and coercion\n\n Parameters\n ----------\n values : the ndarray that we want to maybe upcast\n fill_value : what we want to fill with\n dtype : if None, then use the dtype of the values, else coerce to this type\n copy : if True always make a copy even if no upcast is required\n "
] |
Please provide a description of the function:def invalidate_string_dtypes(dtype_set):
non_string_dtypes = dtype_set - {np.dtype('S').type, np.dtype('<U').type}
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead") | [
"Change string like dtypes to object for\n ``DataFrame.select_dtypes()``.\n "
] |
Please provide a description of the function:def coerce_indexer_dtype(indexer, categories):
length = len(categories)
if length < _int8_max:
return ensure_int8(indexer)
elif length < _int16_max:
return ensure_int16(indexer)
elif length < _int32_max:
return ensure_int32(indexer)
return ensure_int64(indexer) | [
" coerce the indexer input array to the smallest dtype possible "
] |
Please provide a description of the function:def coerce_to_dtypes(result, dtypes):
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
def conv(r, dtype):
try:
if isna(r):
pass
elif dtype == _NS_DTYPE:
r = tslibs.Timestamp(r)
elif dtype == _TD_DTYPE:
r = tslibs.Timedelta(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0, 1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except Exception:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)] | [
"\n given a dtypes and a result set, coerce the result elements to the\n dtypes\n "
] |
Please provide a description of the function:def astype_nansafe(arr, dtype, copy=True, skipna=False):
# dispatch on extension dtype if needed
if is_extension_array_dtype(dtype):
return dtype.construct_array_type()._from_sequence(
arr, dtype=dtype, copy=copy)
if not isinstance(dtype, np.dtype):
dtype = pandas_dtype(dtype)
if issubclass(dtype.type, str):
return lib.astype_str(arr.ravel(),
skipna=skipna).reshape(arr.shape)
elif is_datetime64_dtype(arr):
if is_object_dtype(dtype):
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == 'M':
return arr.astype(dtype)
raise TypeError("cannot astype a datetimelike from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif is_timedelta64_dtype(arr):
if is_object_dtype(dtype):
return tslibs.ints_to_pytimedelta(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
if dtype not in [_INT64_DTYPE, _TD_DTYPE]:
# allow frequency conversions
# we return a float here!
if dtype.kind == 'm':
mask = isna(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
elif dtype == _TD_DTYPE:
return arr.astype(_TD_DTYPE, copy=copy)
raise TypeError("cannot astype a timedelta from [{from_dtype}] "
"to [{to_dtype}]".format(from_dtype=arr.dtype,
to_dtype=dtype))
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if not np.isfinite(arr).all():
raise ValueError('Cannot convert non-finite values (NA or inf) to '
'integer')
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = ("The '{dtype}' dtype has no unit. "
"Please pass in '{dtype}[ns]' instead.")
raise ValueError(msg.format(dtype=dtype.name))
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype) | [
"\n Cast the elements of an array to a given dtype a nan-safe manner.\n\n Parameters\n ----------\n arr : ndarray\n dtype : np.dtype\n copy : bool, default True\n If False, a view will be attempted but may fail, if\n e.g. the item sizes don't align.\n skipna: bool, default False\n Whether or not we should skip NaN when casting as a string-type.\n\n Raises\n ------\n ValueError\n The dtype was a datetime64/timedelta64 dtype, but it had no unit.\n "
] |
Please provide a description of the function:def maybe_convert_objects(values, convert_dates=True, convert_numeric=True,
convert_timedeltas=True, copy=True):
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = maybe_cast_to_datetime(
values, 'M8[ns]', errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(values,
convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.core.tools.timedeltas import to_timedelta
new_values = to_timedelta(values, errors='coerce')
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# if we are all nans then leave me alone
if not isna(new_values).all():
values = new_values
except Exception:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
values = values.copy() if copy else values
return values | [
" if we have an object dtype, try to coerce dates and/or numbers "
] |
Please provide a description of the function:def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True,
coerce=False, copy=True):
conversion_count = sum((datetime, numeric, timedelta))
if conversion_count == 0:
raise ValueError('At least one of datetime, numeric or timedelta must '
'be True.')
elif conversion_count > 1 and coerce:
raise ValueError("Only one of 'datetime', 'numeric' or "
"'timedelta' can be True when when coerce=True.")
if isinstance(values, (list, tuple)):
# List or scalar
values = np.array(values, dtype=np.object_)
elif not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
elif not is_object_dtype(values.dtype):
# If not object, do not attempt conversion
values = values.copy() if copy else values
return values
# If 1 flag is coerce, ensure 2 others are False
if coerce:
# Immediate return if coerce
if datetime:
from pandas import to_datetime
return to_datetime(values, errors='coerce').to_numpy()
elif timedelta:
from pandas import to_timedelta
return to_timedelta(values, errors='coerce').to_numpy()
elif numeric:
from pandas import to_numeric
return to_numeric(values, errors='coerce')
# Soft conversions
if datetime:
# GH 20380, when datetime is beyond year 2262, hence outside
# bound of nanosecond-resolution 64-bit integers.
try:
values = lib.maybe_convert_objects(values,
convert_datetime=datetime)
except OutOfBoundsDatetime:
pass
if timedelta and is_object_dtype(values.dtype):
# Object check to ensure only run if previous did not convert
values = lib.maybe_convert_objects(values, convert_timedelta=timedelta)
if numeric and is_object_dtype(values.dtype):
try:
converted = lib.maybe_convert_numeric(values, set(),
coerce_numeric=True)
# If all NaNs, then do not-alter
values = converted if not isna(converted).all() else values
values = values.copy() if copy else values
except Exception:
pass
return values | [
" if we have an object dtype, try to coerce dates and/or numbers "
] |
Please provide a description of the function:def maybe_infer_to_datetimelike(value, convert_dates=False):
# TODO: why not timedelta?
if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex,
ABCDatetimeArray, ABCPeriodArray)):
return value
elif isinstance(value, ABCSeries):
if isinstance(value._values, ABCDatetimeIndex):
return value._values
v = value
if not is_list_like(v):
v = [v]
v = np.array(v, copy=False)
# we only care about object dtypes
if not is_object_dtype(v):
return value
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if not len(v):
return value
def try_datetime(v):
# safe coerce to datetime64
try:
# GH19671
v = tslib.array_to_datetime(v,
require_iso8601=True,
errors='raise')[0]
except ValueError:
# we might have a sequence of the same-datetimes with tz's
# if so coerce to a DatetimeIndex; if they are not the same,
# then these stay as object dtype, xref GH19671
try:
from pandas._libs.tslibs import conversion
from pandas import DatetimeIndex
values, tz = conversion.datetime_to_datetime64(v)
return DatetimeIndex(values).tz_localize(
'UTC').tz_convert(tz=tz)
except (ValueError, TypeError):
pass
except Exception:
pass
return v.reshape(shape)
def try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas import to_timedelta
try:
return to_timedelta(v)._ndarray_values.reshape(shape)
except Exception:
return v.reshape(shape)
inferred_type = lib.infer_datetimelike_array(ensure_object(v))
if inferred_type == 'date' and convert_dates:
value = try_datetime(v)
elif inferred_type == 'datetime':
value = try_datetime(v)
elif inferred_type == 'timedelta':
value = try_timedelta(v)
elif inferred_type == 'nat':
# if all NaT, return as datetime
if isna(v).all():
value = try_datetime(v)
else:
# We have at least a NaT and a string
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
value = try_timedelta(v)
if lib.infer_dtype(value, skipna=False) in ['mixed']:
# cannot skip missing values, as NaT implies that the string
# is actually a datetime
value = try_datetime(v)
return value | [
"\n we might have a array (or single object) that is datetime like,\n and no dtype is passed don't change the value unless we find a\n datetime/timedelta set\n\n this is pretty strict in that a datetime/timedelta is REQUIRED\n in addition to possible nulls/string likes\n\n Parameters\n ----------\n value : np.array / Series / Index / list-like\n convert_dates : boolean, default False\n if True try really hard to convert dates (such as datetime.date), other\n leave inferred dtype 'date' alone\n\n "
] |
Please provide a description of the function:def maybe_cast_to_datetime(value, dtype, errors='raise'):
from pandas.core.tools.timedeltas import to_timedelta
from pandas.core.tools.datetimes import to_datetime
if dtype is not None:
if isinstance(dtype, str):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# Force the dtype if needed.
msg = ("The '{dtype}' dtype has no unit. "
"Please pass in '{dtype}[ns]' instead.")
if is_datetime64 and not is_dtype_equal(dtype, _NS_DTYPE):
if dtype.name in ('datetime64', 'datetime64[ns]'):
if dtype.name == 'datetime64':
raise ValueError(msg.format(dtype=dtype.name))
dtype = _NS_DTYPE
else:
raise TypeError("cannot convert datetimelike to "
"dtype [{dtype}]".format(dtype=dtype))
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, _TD_DTYPE):
if dtype.name in ('timedelta64', 'timedelta64[ns]'):
if dtype.name == 'timedelta64':
raise ValueError(msg.format(dtype=dtype.name))
dtype = _TD_DTYPE
else:
raise TypeError("cannot convert timedeltalike to "
"dtype [{dtype}]".format(dtype=dtype))
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
else:
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype,
dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)
# GH 25843: Remove tz information since the dtype
# didn't specify one
if value.tz is not None:
value = value.tz_localize(None)
value = value._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value)
value = to_datetime(value, errors=errors).array
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = (value.tz_localize('UTC')
.tz_convert(dtype.tz))
elif is_timedelta64:
value = to_timedelta(value, errors=errors)._values
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(value) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
ints = np.asarray(value).view('i8')
return tslib.ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError('Cannot cast datetime64 to {dtype}'
.format(dtype=dtype))
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ['M', 'm']:
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
value = to_timedelta(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (is_array and not (issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
value = maybe_infer_to_datetimelike(value)
return value | [
" try to cast the array/value to a datetimelike dtype, converting float\n nan to iNaT\n "
] |
Please provide a description of the function:def find_common_type(types):
if len(types) == 0:
raise ValueError('no types given')
first = types[0]
# workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
# => object
if all(is_dtype_equal(first, t) for t in types[1:]):
return first
if any(isinstance(t, (PandasExtensionDtype, ExtensionDtype))
for t in types):
return np.object
# take lowest unit
if all(is_datetime64_dtype(t) for t in types):
return np.dtype('datetime64[ns]')
if all(is_timedelta64_dtype(t) for t in types):
return np.dtype('timedelta64[ns]')
# don't mix bool / int or float or complex
# this is different from numpy, which casts bool with float/int as int
has_bools = any(is_bool_dtype(t) for t in types)
if has_bools:
for t in types:
if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):
return np.object
return np.find_common_type(types, []) | [
"\n Find a common data type among the given dtypes.\n\n Parameters\n ----------\n types : list of dtypes\n\n Returns\n -------\n pandas extension or numpy dtype\n\n See Also\n --------\n numpy.find_common_type\n\n "
] |
Please provide a description of the function:def cast_scalar_to_array(shape, value, dtype=None):
if dtype is None:
dtype, fill_value = infer_dtype_from_scalar(value)
else:
fill_value = value
values = np.empty(shape, dtype=dtype)
values.fill(fill_value)
return values | [
"\n create np.ndarray of specified shape and dtype, filled with values\n\n Parameters\n ----------\n shape : tuple\n value : scalar value\n dtype : np.dtype, optional\n dtype to coerce\n\n Returns\n -------\n ndarray of shape, filled with value, of specified / inferred dtype\n\n "
] |
Please provide a description of the function:def construct_1d_arraylike_from_scalar(value, length, dtype):
if is_datetime64tz_dtype(dtype):
from pandas import DatetimeIndex
subarr = DatetimeIndex([value] * length, dtype=dtype)
elif is_categorical_dtype(dtype):
from pandas import Categorical
subarr = Categorical([value] * length, dtype=dtype)
else:
if not isinstance(dtype, (np.dtype, type(np.dtype))):
dtype = dtype.dtype
if length and is_integer_dtype(dtype) and isna(value):
# coerce if we have nan for an integer dtype
dtype = np.dtype('float64')
elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
# we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = object
if not isna(value):
value = to_str(value)
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr | [
"\n create a np.ndarray / pandas type of specified shape and dtype\n filled with values\n\n Parameters\n ----------\n value : scalar value\n length : int\n dtype : pandas_dtype / np.dtype\n\n Returns\n -------\n np.ndarray / pandas type of length, filled with value\n\n "
] |
Please provide a description of the function:def construct_1d_object_array_from_listlike(values):
# numpy will try to interpret nested lists as further dimensions, hence
# making a 1D array that contains list-likes is a bit tricky:
result = np.empty(len(values), dtype='object')
result[:] = values
return result | [
"\n Transform any list-like object in a 1-dimensional numpy array of object\n dtype.\n\n Parameters\n ----------\n values : any iterable which has a len()\n\n Raises\n ------\n TypeError\n * If `values` does not have a len()\n\n Returns\n -------\n 1-dimensional numpy array of dtype object\n "
] |
Please provide a description of the function:def construct_1d_ndarray_preserving_na(values, dtype=None, copy=False):
subarr = np.array(values, dtype=dtype, copy=copy)
if dtype is not None and dtype.kind in ("U", "S"):
# GH-21083
# We can't just return np.array(subarr, dtype='str') since
# NumPy will convert the non-string objects into strings
# Including NA values. Se we have to go
# string -> object -> update NA, which requires an
# additional pass over the data.
na_values = isna(values)
subarr2 = subarr.astype(object)
subarr2[na_values] = np.asarray(values, dtype=object)[na_values]
subarr = subarr2
return subarr | [
"\n Construct a new ndarray, coercing `values` to `dtype`, preserving NA.\n\n Parameters\n ----------\n values : Sequence\n dtype : numpy.dtype, optional\n copy : bool, default False\n Note that copies may still be made with ``copy=False`` if casting\n is required.\n\n Returns\n -------\n arr : ndarray[dtype]\n\n Examples\n --------\n >>> np.array([1.0, 2.0, None], dtype='str')\n array(['1.0', '2.0', 'None'], dtype='<U4')\n\n >>> construct_1d_ndarray_preserving_na([1.0, 2.0, None], dtype='str')\n\n\n "
] |
Please provide a description of the function:def maybe_cast_to_integer_array(arr, dtype, copy=False):
try:
if not hasattr(arr, "astype"):
casted = np.array(arr, dtype=dtype, copy=copy)
else:
casted = arr.astype(dtype, copy=copy)
except OverflowError:
raise OverflowError("The elements provided in the data cannot all be "
"casted to the dtype {dtype}".format(dtype=dtype))
if np.array_equal(arr, casted):
return casted
# We do this casting to allow for proper
# data and dtype checking.
#
# We didn't do this earlier because NumPy
# doesn't handle `uint64` correctly.
arr = np.asarray(arr)
if is_unsigned_integer_dtype(dtype) and (arr < 0).any():
raise OverflowError("Trying to coerce negative values "
"to unsigned integers")
if is_integer_dtype(dtype) and (is_float_dtype(arr) or
is_object_dtype(arr)):
raise ValueError("Trying to coerce float values to integers") | [
"\n Takes any dtype and returns the casted version, raising for when data is\n incompatible with integer/unsigned integer dtypes.\n\n .. versionadded:: 0.24.0\n\n Parameters\n ----------\n arr : array-like\n The array to cast.\n dtype : str, np.dtype\n The integer dtype to cast the array to.\n copy: boolean, default False\n Whether to make a copy of the array before returning.\n\n Returns\n -------\n int_arr : ndarray\n An array of integer or unsigned integer dtype\n\n Raises\n ------\n OverflowError : the dtype is incompatible with the data\n ValueError : loss of precision has occurred during casting\n\n Examples\n --------\n If you try to coerce negative values to unsigned integers, it raises:\n\n >>> Series([-1], dtype=\"uint64\")\n Traceback (most recent call last):\n ...\n OverflowError: Trying to coerce negative values to unsigned integers\n\n Also, if you try to coerce float values to integers, it raises:\n\n >>> Series([1, 2, 3.5], dtype=\"int64\")\n Traceback (most recent call last):\n ...\n ValueError: Trying to coerce float values to integers\n "
] |
Please provide a description of the function:def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig | [
"\n Make a scatter plot from two DataFrame columns\n\n Parameters\n ----------\n data : DataFrame\n x : Column name for the x-axis values\n y : Column name for the y-axis values\n ax : Matplotlib axis object\n figsize : A tuple (width, height) in inches\n grid : Setting this to True will show the grid\n kwargs : other plotting keyword arguments\n To be passed to scatter function\n\n Returns\n -------\n matplotlib.Figure\n "
] |
Please provide a description of the function:def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
_raise_if_no_mpl()
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, ABCIndexClass)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(com.try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes | [
"\n Make a histogram of the DataFrame's.\n\n A `histogram`_ is a representation of the distribution of data.\n This function calls :meth:`matplotlib.pyplot.hist`, on each series in\n the DataFrame, resulting in one histogram per column.\n\n .. _histogram: https://en.wikipedia.org/wiki/Histogram\n\n Parameters\n ----------\n data : DataFrame\n The pandas object holding the data.\n column : string or sequence\n If passed, will be used to limit data to a subset of columns.\n by : object, optional\n If passed, then used to form histograms for separate groups.\n grid : bool, default True\n Whether to show axis grid lines.\n xlabelsize : int, default None\n If specified changes the x-axis label size.\n xrot : float, default None\n Rotation of x axis labels. For example, a value of 90 displays the\n x labels rotated 90 degrees clockwise.\n ylabelsize : int, default None\n If specified changes the y-axis label size.\n yrot : float, default None\n Rotation of y axis labels. For example, a value of 90 displays the\n y labels rotated 90 degrees clockwise.\n ax : Matplotlib axes object, default None\n The axes to plot the histogram on.\n sharex : bool, default True if ax is None else False\n In case subplots=True, share x axis and set some x axis labels to\n invisible; defaults to True if ax is None otherwise False if an ax\n is passed in.\n Note that passing in both an ax and sharex=True will alter all x axis\n labels for all subplots in a figure.\n sharey : bool, default False\n In case subplots=True, share y axis and set some y axis labels to\n invisible.\n figsize : tuple\n The size in inches of the figure to create. Uses the value in\n `matplotlib.rcParams` by default.\n layout : tuple, optional\n Tuple of (rows, columns) for the layout of the histograms.\n bins : integer or sequence, default 10\n Number of histogram bins to be used. If an integer is given, bins + 1\n bin edges are calculated and returned. If bins is a sequence, gives\n bin edges, including left edge of first bin and right edge of last\n bin. In this case, bins is returned unmodified.\n **kwds\n All other plotting keyword arguments to be passed to\n :meth:`matplotlib.pyplot.hist`.\n\n Returns\n -------\n matplotlib.AxesSubplot or numpy.ndarray of them\n\n See Also\n --------\n matplotlib.pyplot.hist : Plot a histogram using matplotlib.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n This example draws a histogram based on the length and width of\n some animals, displayed in three bins\n\n >>> df = pd.DataFrame({\n ... 'length': [1.5, 0.5, 1.2, 0.9, 3],\n ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]\n ... }, index= ['pig', 'rabbit', 'duck', 'chicken', 'horse'])\n >>> hist = df.hist(bins=3)\n "
] |
Please provide a description of the function:def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes | [
"\n Draw histogram of the input series using matplotlib.\n\n Parameters\n ----------\n by : object, optional\n If passed, then used to form histograms for separate groups\n ax : matplotlib axis object\n If not passed, uses gca()\n grid : bool, default True\n Whether to show axis grid lines\n xlabelsize : int, default None\n If specified changes the x-axis label size\n xrot : float, default None\n rotation of x axis labels\n ylabelsize : int, default None\n If specified changes the y-axis label size\n yrot : float, default None\n rotation of y axis labels\n figsize : tuple, default None\n figure size in inches by default\n bins : integer or sequence, default 10\n Number of histogram bins to be used. If an integer is given, bins + 1\n bin edges are calculated and returned. If bins is a sequence, gives\n bin edges, including left edge of first bin and right edge of last\n bin. In this case, bins is returned unmodified.\n bins : integer, default 10\n Number of histogram bins to be used\n `**kwds` : keywords\n To be passed to the actual plotting function\n\n See Also\n --------\n matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.\n "
] |
Please provide a description of the function:def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
_raise_if_no_mpl()
_converter._WARN = False
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes | [
"\n Grouped histogram\n\n Parameters\n ----------\n data : Series/DataFrame\n column : object, optional\n by : object, optional\n ax : axes, optional\n bins : int, default 50\n figsize : tuple, optional\n layout : optional\n sharex : bool, default False\n sharey : bool, default False\n rot : int, default 90\n grid : bool, default True\n kwargs : dict, keyword arguments passed to matplotlib.Axes.hist\n\n Returns\n -------\n collection of Matplotlib Axes\n "
] |
Please provide a description of the function:def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, sharex=False, sharey=True, **kwds):
_raise_if_no_mpl()
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=sharex, sharey=sharey,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret | [
"\n Make box plots from DataFrameGroupBy data.\n\n Parameters\n ----------\n grouped : Grouped DataFrame\n subplots : bool\n * ``False`` - no subplots will be used\n * ``True`` - create a subplot for each group\n column : column name or list of names, or vector\n Can be any valid input to groupby\n fontsize : int or string\n rot : label rotation angle\n grid : Setting this to True will show the grid\n ax : Matplotlib axis object, default None\n figsize : A tuple (width, height) in inches\n layout : tuple (optional)\n (rows, columns) for the layout of the plot\n sharex : bool, default False\n Whether x-axes will be shared among subplots\n\n .. versionadded:: 0.23.1\n sharey : bool, default True\n Whether y-axes will be shared among subplots\n\n .. versionadded:: 0.23.1\n `**kwds` : Keyword Arguments\n All other plotting keyword arguments to be passed to\n matplotlib's boxplot function\n\n Returns\n -------\n dict of key/value = group key/DataFrame.boxplot return value\n or DataFrame.boxplot return value in case subplots=figures=False\n\n Examples\n --------\n >>> import itertools\n >>> tuples = [t for t in itertools.product(range(1000), range(4))]\n >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])\n >>> data = np.random.randn(len(index),4)\n >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)\n >>>\n >>> grouped = df.groupby(level='lvl1')\n >>> boxplot_frame_groupby(grouped)\n >>>\n >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)\n >>> boxplot_frame_groupby(grouped, subplots=False)\n "
] |
Please provide a description of the function:def _has_plotted_object(self, ax):
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0) | [
"check whether ax has data"
] |
Please provide a description of the function:def result(self):
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0] | [
"\n Return result axes\n "
] |
Please provide a description of the function:def _post_plot_logic_common(self, ax, data):
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError | [
"Common post process for each axes"
] |
Please provide a description of the function:def _adorn_subplots(self):
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title) | [
"Common post process unrelated to data"
] |
Please provide a description of the function:def _apply_axis_properties(self, axis, rot=None, fontsize=None):
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize) | [
" Tick creation within matplotlib is reasonably expensive and is\n internally deferred until accessed as Ticks are created/destroyed\n multiple times per draw. It's therefore beneficial for us to avoid\n accessing unless we will act on the Tick.\n "
] |
Please provide a description of the function:def _get_ax_layer(cls, ax, primary=True):
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax) | [
"get left (primary) or right (secondary) axes"
] |
Please provide a description of the function:def _apply_style_colors(self, colors, kwds, col_num, label):
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds | [
"\n Manage style and color based on column number and its label.\n Returns tuple of appropriate style and kwds which \"color\" may be added.\n "
] |
Please provide a description of the function:def _parse_errorbars(self, label, err):
if err is None:
return None
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, ABCDataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, ABCSeries):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, str):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid {label} detected".format(label=label)
raise ValueError(msg)
return err | [
"\n Look for error keyword arguments and return the actual errorbar data\n or return the error DataFrame/dict\n\n Error bars can be specified in several ways:\n Series: the user provides a pandas.Series object of the same\n length as the data\n ndarray: provides a np.ndarray of the same length as the data\n DataFrame/dict: error values are paired with keys matching the\n key in the plotted DataFrame\n str: the name of the column within the plotted DataFrame\n "
] |
Please provide a description of the function:def _make_plot_keywords(self, kwds, y):
# y is required for KdePlot
kwds['bottom'] = self.bottom
kwds['bins'] = self.bins
return kwds | [
"merge BoxPlot/KdePlot properties to passed kwds"
] |
Please provide a description of the function:def line(self, x=None, y=None, **kwds):
return self(kind='line', x=x, y=y, **kwds) | [
"\n Plot DataFrame columns as lines.\n\n This function is useful to plot lines using DataFrame's values\n as coordinates.\n\n Parameters\n ----------\n x : int or str, optional\n Columns to use for the horizontal axis.\n Either the location or the label of the columns to be used.\n By default, it will use the DataFrame indices.\n y : int, str, or list of them, optional\n The values to be plotted.\n Either the location or the label of the columns to be used.\n By default, it will use the remaining DataFrame numeric columns.\n **kwds\n Keyword arguments to pass on to :meth:`DataFrame.plot`.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray`\n Return an ndarray when ``subplots=True``.\n\n See Also\n --------\n matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n The following example shows the populations for some animals\n over the years.\n\n >>> df = pd.DataFrame({\n ... 'pig': [20, 18, 489, 675, 1776],\n ... 'horse': [4, 25, 281, 600, 1900]\n ... }, index=[1990, 1997, 2003, 2009, 2014])\n >>> lines = df.plot.line()\n\n .. plot::\n :context: close-figs\n\n An example with subplots, so an array of axes is returned.\n\n >>> axes = df.plot.line(subplots=True)\n >>> type(axes)\n <class 'numpy.ndarray'>\n\n .. plot::\n :context: close-figs\n\n The following example shows the relationship between both\n populations.\n\n >>> lines = df.plot.line(x='pig', y='horse')\n "
] |
Please provide a description of the function:def bar(self, x=None, y=None, **kwds):
return self(kind='bar', x=x, y=y, **kwds) | [
"\n Vertical bar plot.\n\n A bar plot is a plot that presents categorical data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n\n Parameters\n ----------\n x : label or position, optional\n Allows plotting of one column versus another. If not specified,\n the index of the DataFrame is used.\n y : label or position, optional\n Allows plotting of one column versus another. If not specified,\n all numerical columns are used.\n **kwds\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or np.ndarray of them\n An ndarray is returned with one :class:`matplotlib.axes.Axes`\n per column when ``subplots=True``.\n\n See Also\n --------\n DataFrame.plot.barh : Horizontal bar plot.\n DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.bar : Make a bar plot with matplotlib.\n\n Examples\n --------\n Basic plot.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})\n >>> ax = df.plot.bar(x='lab', y='val', rot=0)\n\n Plot a whole dataframe to a bar plot. Each column is assigned a\n distinct color, and each row is nested in a group along the\n horizontal axis.\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.bar(rot=0)\n\n Instead of nesting, the figure can be split by column with\n ``subplots=True``. In this case, a :class:`numpy.ndarray` of\n :class:`matplotlib.axes.Axes` are returned.\n\n .. plot::\n :context: close-figs\n\n >>> axes = df.plot.bar(rot=0, subplots=True)\n >>> axes[1].legend(loc=2) # doctest: +SKIP\n\n Plot a single column.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(y='speed', rot=0)\n\n Plot only selected categories for the DataFrame.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(x='lifespan', rot=0)\n "
] |
Please provide a description of the function:def barh(self, x=None, y=None, **kwds):
return self(kind='barh', x=x, y=y, **kwds) | [
"\n Make a horizontal bar plot.\n\n A horizontal bar plot is a plot that presents quantitative data with\n rectangular bars with lengths proportional to the values that they\n represent. A bar plot shows comparisons among discrete categories. One\n axis of the plot shows the specific categories being compared, and the\n other axis represents a measured value.\n\n Parameters\n ----------\n x : label or position, default DataFrame.index\n Column to be used for categories.\n y : label or position, default All numeric columns in dataframe\n Columns to be plotted from the DataFrame.\n **kwds\n Keyword arguments to pass on to :meth:`DataFrame.plot`.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n See Also\n --------\n DataFrame.plot.bar: Vertical bar plot.\n DataFrame.plot : Make plots of DataFrame using matplotlib.\n matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.\n\n Examples\n --------\n Basic example\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})\n >>> ax = df.plot.barh(x='lab', y='val')\n\n Plot a whole DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh()\n\n Plot a column of the DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh(y='speed')\n\n Plot DataFrame versus the desired column\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = ['snail', 'pig', 'elephant',\n ... 'rabbit', 'giraffe', 'coyote', 'horse']\n >>> df = pd.DataFrame({'speed': speed,\n ... 'lifespan': lifespan}, index=index)\n >>> ax = df.plot.barh(x='lifespan')\n "
] |
Please provide a description of the function:def hist(self, by=None, bins=10, **kwds):
return self(kind='hist', by=by, bins=bins, **kwds) | [
"\n Draw one histogram of the DataFrame's columns.\n\n A histogram is a representation of the distribution of data.\n This function groups the values of all given Series in the DataFrame\n into bins and draws all bins in one :class:`matplotlib.axes.Axes`.\n This is useful when the DataFrame's Series are in a similar scale.\n\n Parameters\n ----------\n by : str or sequence, optional\n Column in the DataFrame to group by.\n bins : int, default 10\n Number of histogram bins to be used.\n **kwds\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n class:`matplotlib.AxesSubplot`\n Return a histogram plot.\n\n See Also\n --------\n DataFrame.hist : Draw histograms per DataFrame's Series.\n Series.hist : Draw a histogram with Series' data.\n\n Examples\n --------\n When we draw a dice 6000 times, we expect to get each value around 1000\n times. But when we draw two dices and sum the result, the distribution\n is going to be quite different. A histogram illustrates those\n distributions.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(\n ... np.random.randint(1, 7, 6000),\n ... columns = ['one'])\n >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)\n >>> ax = df.plot.hist(bins=12, alpha=0.5)\n "
] |
Please provide a description of the function:def area(self, x=None, y=None, **kwds):
return self(kind='area', x=x, y=y, **kwds) | [
"\n Draw a stacked area plot.\n\n An area plot displays quantitative data visually.\n This function wraps the matplotlib area function.\n\n Parameters\n ----------\n x : label or position, optional\n Coordinates for the X axis. By default uses the index.\n y : label or position, optional\n Column to plot. By default uses all columns.\n stacked : bool, default True\n Area plots are stacked by default. Set to False to create a\n unstacked plot.\n **kwds : optional\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or numpy.ndarray\n Area plot, or array of area plots if subplots is True.\n\n See Also\n --------\n DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.\n\n Examples\n --------\n Draw an area plot based on basic business metrics:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'sales': [3, 2, 3, 9, 10, 6],\n ... 'signups': [5, 5, 6, 12, 14, 13],\n ... 'visits': [20, 42, 28, 62, 81, 50],\n ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',\n ... freq='M'))\n >>> ax = df.plot.area()\n\n Area plots are stacked by default. To produce an unstacked plot,\n pass ``stacked=False``:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.area(stacked=False)\n\n Draw an area plot for a single column:\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.area(y='sales')\n\n Draw with a different `x`:\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\n ... 'sales': [3, 2, 3],\n ... 'visits': [20, 42, 28],\n ... 'day': [1, 2, 3],\n ... })\n >>> ax = df.plot.area(x='day')\n "
] |
Please provide a description of the function:def scatter(self, x, y, s=None, c=None, **kwds):
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds) | [
"\n Create a scatter plot with varying marker point size and color.\n\n The coordinates of each point are defined by two dataframe columns and\n filled circles are used to represent each point. This kind of plot is\n useful to see complex correlations between two variables. Points could\n be for instance natural 2D coordinates like longitude and latitude in\n a map or, in general, any pair of metrics that can be plotted against\n each other.\n\n Parameters\n ----------\n x : int or str\n The column name or column position to be used as horizontal\n coordinates for each point.\n y : int or str\n The column name or column position to be used as vertical\n coordinates for each point.\n s : scalar or array_like, optional\n The size of each point. Possible values are:\n\n - A single scalar so all points have the same size.\n\n - A sequence of scalars, which will be used for each point's size\n recursively. For instance, when passing [2,14] all points size\n will be either 2 or 14, alternatively.\n\n c : str, int or array_like, optional\n The color of each point. Possible values are:\n\n - A single color string referred to by name, RGB or RGBA code,\n for instance 'red' or '#a98d19'.\n\n - A sequence of color strings referred to by name, RGB or RGBA\n code, which will be used for each point's color recursively. For\n instance ['green','yellow'] all points will be filled in green or\n yellow, alternatively.\n\n - A column name or position whose values will be used to color the\n marker points according to a colormap.\n\n **kwds\n Keyword arguments to pass on to :meth:`DataFrame.plot`.\n\n Returns\n -------\n :class:`matplotlib.axes.Axes` or numpy.ndarray of them\n\n See Also\n --------\n matplotlib.pyplot.scatter : Scatter plot using multiple input data\n formats.\n\n Examples\n --------\n Let's see how to draw a scatter plot using coordinates from the values\n in a DataFrame's columns.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],\n ... [6.4, 3.2, 1], [5.9, 3.0, 2]],\n ... columns=['length', 'width', 'species'])\n >>> ax1 = df.plot.scatter(x='length',\n ... y='width',\n ... c='DarkBlue')\n\n And now with the color determined by a column as well.\n\n .. plot::\n :context: close-figs\n\n >>> ax2 = df.plot.scatter(x='length',\n ... y='width',\n ... c='species',\n ... colormap='viridis')\n "
] |
Please provide a description of the function:def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds) | [
"\n Generate a hexagonal binning plot.\n\n Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`\n (the default), this is a histogram of the number of occurrences\n of the observations at ``(x[i], y[i])``.\n\n If `C` is specified, specifies values at given coordinates\n ``(x[i], y[i])``. These values are accumulated for each hexagonal\n bin and then reduced according to `reduce_C_function`,\n having as default the NumPy's mean function (:meth:`numpy.mean`).\n (If `C` is specified, it must also be a 1-D sequence\n of the same length as `x` and `y`, or a column label.)\n\n Parameters\n ----------\n x : int or str\n The column label or position for x points.\n y : int or str\n The column label or position for y points.\n C : int or str, optional\n The column label or position for the value of `(x, y)` point.\n reduce_C_function : callable, default `np.mean`\n Function of one argument that reduces all the values in a bin to\n a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).\n gridsize : int or tuple of (int, int), default 100\n The number of hexagons in the x-direction.\n The corresponding number of hexagons in the y-direction is\n chosen in a way that the hexagons are approximately regular.\n Alternatively, gridsize can be a tuple with two elements\n specifying the number of hexagons in the x-direction and the\n y-direction.\n **kwds\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.AxesSubplot\n The matplotlib ``Axes`` on which the hexbin is plotted.\n\n See Also\n --------\n DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,\n the matplotlib function that is used under the hood.\n\n Examples\n --------\n The following examples are generated with random data from\n a normal distribution.\n\n .. plot::\n :context: close-figs\n\n >>> n = 10000\n >>> df = pd.DataFrame({'x': np.random.randn(n),\n ... 'y': np.random.randn(n)})\n >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)\n\n The next example uses `C` and `np.sum` as `reduce_C_function`.\n Note that `'observations'` values ranges from 1 to 5 but the result\n plot shows values up to more than 25. This is because of the\n `reduce_C_function`.\n\n .. plot::\n :context: close-figs\n\n >>> n = 500\n >>> df = pd.DataFrame({\n ... 'coord_x': np.random.uniform(-3, 3, size=n),\n ... 'coord_y': np.random.uniform(30, 50, size=n),\n ... 'observations': np.random.randint(1,5, size=n)\n ... })\n >>> ax = df.plot.hexbin(x='coord_x',\n ... y='coord_y',\n ... C='observations',\n ... reduce_C_function=np.sum,\n ... gridsize=10,\n ... cmap=\"viridis\")\n "
] |
Please provide a description of the function:def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True):
obs_idxes = [obj._get_axis(axis) for obj in objs
if hasattr(obj, '_get_axis')]
if obs_idxes:
return _get_combined_index(obs_idxes, intersect=intersect, sort=sort) | [
"\n Extract combined index: return intersection or union (depending on the\n value of \"intersect\") of indexes on given axis, or None if all objects\n lack indexes (e.g. they are numpy arrays).\n\n Parameters\n ----------\n objs : list of objects\n Each object will only be considered if it has a _get_axis\n attribute.\n intersect : bool, default False\n If True, calculate the intersection between indexes. Otherwise,\n calculate the union.\n axis : {0 or 'index', 1 or 'outer'}, default 0\n The axis to extract indexes from.\n sort : bool, default True\n Whether the result index should come out sorted or not.\n\n Returns\n -------\n Index\n "
] |
Please provide a description of the function:def _get_distinct_objs(objs):
ids = set()
res = []
for obj in objs:
if not id(obj) in ids:
ids.add(id(obj))
res.append(obj)
return res | [
"\n Return a list with distinct elements of \"objs\" (different ids).\n Preserves order.\n "
] |
Please provide a description of the function:def _get_combined_index(indexes, intersect=False, sort=False):
# TODO: handle index names!
indexes = _get_distinct_objs(indexes)
if len(indexes) == 0:
index = Index([])
elif len(indexes) == 1:
index = indexes[0]
elif intersect:
index = indexes[0]
for other in indexes[1:]:
index = index.intersection(other)
else:
index = _union_indexes(indexes, sort=sort)
index = ensure_index(index)
if sort:
try:
index = index.sort_values()
except TypeError:
pass
return index | [
"\n Return the union or intersection of indexes.\n\n Parameters\n ----------\n indexes : list of Index or list objects\n When intersect=True, do not accept list of lists.\n intersect : bool, default False\n If True, calculate the intersection between indexes. Otherwise,\n calculate the union.\n sort : bool, default False\n Whether the result index should come out sorted or not.\n\n Returns\n -------\n Index\n "
] |
Please provide a description of the function:def _union_indexes(indexes, sort=True):
if len(indexes) == 0:
raise AssertionError('Must have at least 1 Index to union')
if len(indexes) == 1:
result = indexes[0]
if isinstance(result, list):
result = Index(sorted(result))
return result
indexes, kind = _sanitize_and_check(indexes)
def _unique_indices(inds):
def conv(i):
if isinstance(i, Index):
i = i.tolist()
return i
return Index(
lib.fast_unique_multiple_list([conv(i) for i in inds], sort=sort))
if kind == 'special':
result = indexes[0]
if hasattr(result, 'union_many'):
return result.union_many(indexes[1:])
else:
for other in indexes[1:]:
result = result.union(other)
return result
elif kind == 'array':
index = indexes[0]
for other in indexes[1:]:
if not index.equals(other):
if sort is None:
# TODO: remove once pd.concat sort default changes
warnings.warn(_sort_msg, FutureWarning, stacklevel=8)
sort = True
return _unique_indices(indexes)
name = _get_consensus_names(indexes)[0]
if name != index.name:
index = index._shallow_copy(name=name)
return index
else: # kind='list'
return _unique_indices(indexes) | [
"\n Return the union of indexes.\n\n The behavior of sort and names is not consistent.\n\n Parameters\n ----------\n indexes : list of Index or list objects\n sort : bool, default True\n Whether the result index should come out sorted or not.\n\n Returns\n -------\n Index\n ",
"\n Convert indexes to lists and concatenate them, removing duplicates.\n\n The final dtype is inferred.\n\n Parameters\n ----------\n inds : list of Index or list objects\n\n Returns\n -------\n Index\n "
] |
Please provide a description of the function:def _sanitize_and_check(indexes):
kinds = list({type(index) for index in indexes})
if list in kinds:
if len(kinds) > 1:
indexes = [Index(com.try_sort(x))
if not isinstance(x, Index) else
x for x in indexes]
kinds.remove(list)
else:
return indexes, 'list'
if len(kinds) > 1 or Index not in kinds:
return indexes, 'special'
else:
return indexes, 'array' | [
"\n Verify the type of indexes and convert lists to Index.\n\n Cases:\n\n - [list, list, ...]: Return ([list, list, ...], 'list')\n - [list, Index, ...]: Return _sanitize_and_check([Index, Index, ...])\n Lists are sorted and converted to Index.\n - [Index, Index, ...]: Return ([Index, Index, ...], TYPE)\n TYPE = 'special' if at least one special type, 'array' otherwise.\n\n Parameters\n ----------\n indexes : list of Index or list objects\n\n Returns\n -------\n sanitized_indexes : list of Index or list objects\n type : {'list', 'array', 'special'}\n "
] |
Please provide a description of the function:def _get_consensus_names(indexes):
# find the non-none names, need to tupleify to make
# the set hashable, then reverse on return
consensus_names = {tuple(i.names) for i in indexes
if com._any_not_none(*i.names)}
if len(consensus_names) == 1:
return list(list(consensus_names)[0])
return [None] * indexes[0].nlevels | [
"\n Give a consensus 'names' to indexes.\n\n If there's exactly one non-empty 'names', return this,\n otherwise, return empty.\n\n Parameters\n ----------\n indexes : list of Index objects\n\n Returns\n -------\n list\n A list representing the consensus 'names' found.\n "
] |
Please provide a description of the function:def _all_indexes_same(indexes):
first = indexes[0]
for index in indexes[1:]:
if not first.equals(index):
return False
return True | [
"\n Determine if all indexes contain the same elements.\n\n Parameters\n ----------\n indexes : list of Index objects\n\n Returns\n -------\n bool\n True if all indexes contain the same elements, False otherwise.\n "
] |
Please provide a description of the function:def _convert_params(sql, params):
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args | [
"Convert SQL and params args to DBAPI2.0 compliant format."
] |
Please provide a description of the function:def _process_parse_dates_argument(parse_dates):
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
return parse_dates | [
"Process parse_dates argument for read_sql functions"
] |
Please provide a description of the function:def _parse_date_columns(data_frame, parse_dates):
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame | [
"\n Force non-datetime columns to be read as such.\n Supports both string formatted and integer timestamp columns.\n "
] |
Please provide a description of the function:def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame | [
"Wrap result set of query in a DataFrame."
] |
Please provide a description of the function:def execute(sql, con, cur=None, params=None):
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args) | [
"\n Execute the given SQL query using the provided connection object.\n\n Parameters\n ----------\n sql : string\n SQL query to be executed.\n con : SQLAlchemy connectable(engine/connection) or sqlite3 connection\n Using SQLAlchemy makes it possible to use any DB supported by the\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n cur : deprecated, cursor is obtained from connection, default: None\n params : list or tuple, optional, default: None\n List of parameters to pass to execute method.\n\n Returns\n -------\n Results Iterable\n "
] |
Please provide a description of the function:def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table {name} not found".format(name=table_name))
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table {name} not found".format(name=table_name), con) | [
"\n Read SQL database table into a DataFrame.\n\n Given a table name and a SQLAlchemy connectable, returns a DataFrame.\n This function does not support DBAPI connections.\n\n Parameters\n ----------\n table_name : str\n Name of SQL table in database.\n con : SQLAlchemy connectable or str\n A database URI could be provided as as str.\n SQLite DBAPI connection mode not supported.\n schema : str, default None\n Name of SQL schema in database to query (if database flavor\n supports this). Uses default schema if None (default).\n index_col : str or list of str, optional, default: None\n Column(s) to set as index(MultiIndex).\n coerce_float : bool, default True\n Attempts to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point. Can result in loss of Precision.\n parse_dates : list or dict, default None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns : list, default None\n List of column names to select from SQL table.\n chunksize : int, default None\n If specified, returns an iterator where `chunksize` is the number of\n rows to include in each chunk.\n\n Returns\n -------\n DataFrame\n A SQL table is returned as two-dimensional data structure with labeled\n axes.\n\n See Also\n --------\n read_sql_query : Read SQL query into a DataFrame.\n read_sql : Read SQL query or database table into a DataFrame.\n\n Notes\n -----\n Any datetime values with time zone information will be converted to UTC.\n\n Examples\n --------\n >>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP\n "
] |
Please provide a description of the function:def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize) | [
"Read SQL query into a DataFrame.\n\n Returns a DataFrame corresponding to the result set of the query\n string. Optionally provide an `index_col` parameter to use one of the\n columns as the index, otherwise default integer index will be used.\n\n Parameters\n ----------\n sql : string SQL query or SQLAlchemy Selectable (select or text object)\n SQL query to be executed.\n con : SQLAlchemy connectable(engine/connection), database string URI,\n or sqlite3 DBAPI2 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n index_col : string or list of strings, optional, default: None\n Column(s) to set as index(MultiIndex).\n coerce_float : boolean, default True\n Attempts to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point. Useful for SQL result sets.\n params : list, tuple or dict, optional, default: None\n List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the number of\n rows to include in each chunk.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql\n\n Notes\n -----\n Any datetime values with time zone information parsed via the `parse_dates`\n parameter will be converted to UTC.\n "
] |
Please provide a description of the function:def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize) | [
"\n Read SQL query or database table into a DataFrame.\n\n This function is a convenience wrapper around ``read_sql_table`` and\n ``read_sql_query`` (for backward compatibility). It will delegate\n to the specific function depending on the provided input. A SQL query\n will be routed to ``read_sql_query``, while a database table name will\n be routed to ``read_sql_table``. Note that the delegated function might\n have more specific notes about their functionality not listed here.\n\n Parameters\n ----------\n sql : string or SQLAlchemy Selectable (select or text object)\n SQL query to be executed or a table name.\n con : SQLAlchemy connectable (engine/connection) or database string URI\n or DBAPI2 connection (fallback mode)\n\n Using SQLAlchemy makes it possible to use any DB supported by that\n library. If a DBAPI2 object, only sqlite3 is supported.\n index_col : string or list of strings, optional, default: None\n Column(s) to set as index(MultiIndex).\n coerce_float : boolean, default True\n Attempts to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n params : list, tuple or dict, optional, default: None\n List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict corresponds\n to the keyword arguments of :func:`pandas.to_datetime`\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns : list, default: None\n List of column names to select from SQL table (only used when reading\n a table).\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the\n number of rows to include in each chunk.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql_query : Read SQL query into a DataFrame.\n "
] |
Please provide a description of the function:def to_sql(frame, name, con, schema=None, if_exists='fail', index=True,
index_label=None, chunksize=None, dtype=None, method=None):
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype, method=method) | [
"\n Write records stored in a DataFrame to a SQL database.\n\n Parameters\n ----------\n frame : DataFrame, Series\n name : string\n Name of SQL table.\n con : SQLAlchemy connectable(engine/connection) or database string URI\n or sqlite3 DBAPI2 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n schema : string, default None\n Name of SQL schema in database to write to (if database flavor\n supports this). If None, use default schema (default).\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n - fail: If table exists, do nothing.\n - replace: If table exists, drop it, recreate it, and insert data.\n - append: If table exists, insert data. Create if does not exist.\n index : boolean, default True\n Write DataFrame index as a column.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n chunksize : int, default None\n If not None, then rows will be written in batches of this size at a\n time. If None, all rows will be written at once.\n dtype : single SQLtype or dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a SQLAlchemy type, or a string for sqlite3 fallback connection.\n If all columns are of the same type, one single value can be used.\n method : {None, 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n - None : Uses standard SQL ``INSERT`` clause (one per row).\n - 'multi': Pass multiple values in a single ``INSERT`` clause.\n - callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n "
] |
Please provide a description of the function:def has_table(table_name, con, schema=None):
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name) | [
"\n Check if DataBase has named table.\n\n Parameters\n ----------\n table_name: string\n Name of SQL table.\n con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection\n Using SQLAlchemy makes it possible to use any DB supported by that\n library.\n If a DBAPI2 object, only sqlite3 is supported.\n schema : string, default None\n Name of SQL schema in database to write to (if database flavor supports\n this). If None, use default schema (default).\n\n Returns\n -------\n boolean\n "
] |
Please provide a description of the function:def _engine_builder(con):
global _SQLALCHEMY_INSTALLED
if isinstance(con, str):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con | [
"\n Returns a SQLAlchemy engine from a URI (if con is a string)\n else it just return con without modifying it.\n "
] |
Please provide a description of the function:def pandasSQL_builder(con, schema=None, meta=None,
is_cursor=False):
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor) | [
"\n Convenience function to return the correct PandasSQL subclass based on the\n provided parameters.\n "
] |
Please provide a description of the function:def get_schema(frame, name, keys=None, con=None, dtype=None):
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype) | [
"\n Get the SQL db table schema for the given frame.\n\n Parameters\n ----------\n frame : DataFrame\n name : string\n name of SQL table\n keys : string or sequence, default: None\n columns to use a primary key\n con: an open SQL database connection object or a SQLAlchemy connectable\n Using SQLAlchemy makes it possible to use any DB supported by that\n library, default: None\n If a DBAPI2 object, only sqlite3 is supported.\n dtype : dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a SQLAlchemy type, or a string for sqlite3 fallback connection.\n\n "
] |
Please provide a description of the function:def _execute_insert(self, conn, keys, data_iter):
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data) | [
"Execute SQL statement inserting data\n\n Parameters\n ----------\n conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection\n keys : list of str\n Column names\n data_iter : generator of list\n Each item contains a list of values to be inserted\n "
] |
Please provide a description of the function:def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame | [
"Return generator through chunked result set."
] |
Please provide a description of the function:def _harmonize_columns(self, parse_dates=None):
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
except KeyError:
pass | [
"\n Make the DataFrame's column types align with the SQL table\n column types.\n Need to work around limited NA value support. Floats are always\n fine, ints must always be floats if there are Null values.\n Booleans are hard because converting bool column with None replaces\n all Nones with false. Therefore only convert bool if there are no\n NA values.\n Datetimes should already be converted to np.datetime64 if supported,\n but here we also force conversion if required.\n "
] |
Please provide a description of the function:def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize) | [
"Read SQL database table into a DataFrame.\n\n Parameters\n ----------\n table_name : string\n Name of SQL table in database.\n index_col : string, optional, default: None\n Column to set as index.\n coerce_float : boolean, default True\n Attempts to convert values of non-string, non-numeric objects\n (like decimal.Decimal) to floating point. This can result in\n loss of precision.\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg}``, where the arg corresponds\n to the keyword arguments of :func:`pandas.to_datetime`.\n Especially useful with databases without native Datetime support,\n such as SQLite.\n columns : list, default: None\n List of column names to select from SQL table.\n schema : string, default None\n Name of SQL schema in database to query (if database flavor\n supports this). If specified, this overwrites the default\n schema of the SQL database object.\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the number\n of rows to include in each chunk.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n pandas.read_sql_table\n SQLDatabase.read_query\n\n "
] |
Please provide a description of the function:def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates) | [
"Return generator through chunked result set"
] |
Please provide a description of the function:def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame | [
"Read SQL query into a DataFrame.\n\n Parameters\n ----------\n sql : string\n SQL query to be executed.\n index_col : string, optional, default: None\n Column name to use as index for the returned DataFrame object.\n coerce_float : boolean, default True\n Attempt to convert values of non-string, non-numeric objects (like\n decimal.Decimal) to floating point, useful for SQL result sets.\n params : list, tuple or dict, optional, default: None\n List of parameters to pass to execute method. The syntax used\n to pass parameters is database driver dependent. Check your\n database driver documentation for which of the five syntax styles,\n described in PEP 249's paramstyle, is supported.\n Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}\n parse_dates : list or dict, default: None\n - List of column names to parse as dates.\n - Dict of ``{column_name: format string}`` where format string is\n strftime compatible in case of parsing string times, or is one of\n (D, s, ns, ms, us) in case of parsing integer timestamps.\n - Dict of ``{column_name: arg dict}``, where the arg dict\n corresponds to the keyword arguments of\n :func:`pandas.to_datetime` Especially useful with databases\n without native Datetime support, such as SQLite.\n chunksize : int, default None\n If specified, return an iterator where `chunksize` is the number\n of rows to include in each chunk.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql\n\n "
] |
Please provide a description of the function:def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None,
method=None):
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of {column} is not a '
'SQLAlchemy type '.format(column=col))
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize, method=method)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning) | [
"\n Write records stored in a DataFrame to a SQL database.\n\n Parameters\n ----------\n frame : DataFrame\n name : string\n Name of SQL table.\n if_exists : {'fail', 'replace', 'append'}, default 'fail'\n - fail: If table exists, do nothing.\n - replace: If table exists, drop it, recreate it, and insert data.\n - append: If table exists, insert data. Create if does not exist.\n index : boolean, default True\n Write DataFrame index as a column.\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n schema : string, default None\n Name of SQL schema in database to write to (if database flavor\n supports this). If specified, this overwrites the default\n schema of the SQLDatabase object.\n chunksize : int, default None\n If not None, then rows will be written in batches of this size at a\n time. If None, all rows will be written at once.\n dtype : single type or dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a SQLAlchemy type. If all columns are of the same type, one\n single value can be used.\n method : {None', 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n "
] |
Please provide a description of the function:def _create_table_setup(self):
column_names_and_types = self._get_column_names_and_types(
self._sql_type_name
)
pat = re.compile(r'\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts | [
"\n Return a list of SQL statements that creates a table reflecting the\n structure of a DataFrame. The first entry will be a CREATE TABLE\n statement while the rest will be CREATE INDEX statements.\n "
] |
Please provide a description of the function:def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates) | [
"Return generator through chunked result set"
] |
Please provide a description of the function:def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None,
method=None):
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('{column} ({type!s}) not a string'.format(
column=col, type=my_type))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize, method) | [
"\n Write records stored in a DataFrame to a SQL database.\n\n Parameters\n ----------\n frame: DataFrame\n name: string\n Name of SQL table.\n if_exists: {'fail', 'replace', 'append'}, default 'fail'\n fail: If table exists, do nothing.\n replace: If table exists, drop it, recreate it, and insert data.\n append: If table exists, insert data. Create if it does not exist.\n index : boolean, default True\n Write DataFrame index as a column\n index_label : string or sequence, default None\n Column label for index column(s). If None is given (default) and\n `index` is True, then the index names are used.\n A sequence should be given if the DataFrame uses MultiIndex.\n schema : string, default None\n Ignored parameter included for compatibility with SQLAlchemy\n version of ``to_sql``.\n chunksize : int, default None\n If not None, then rows will be written in batches of this\n size at a time. If None, all rows will be written at once.\n dtype : single type or dict of column name to SQL type, default None\n Optional specifying the datatype for columns. The SQL type should\n be a string. If all columns are of the same type, one single value\n can be used.\n method : {None, 'multi', callable}, default None\n Controls the SQL insertion clause used:\n\n * None : Uses standard SQL ``INSERT`` clause (one per row).\n * 'multi': Pass multiple values in a single ``INSERT`` clause.\n * callable with signature ``(pd_table, conn, keys, data_iter)``.\n\n Details and a sample callable implementation can be found in the\n section :ref:`insert method <io.sql.method>`.\n\n .. versionadded:: 0.24.0\n "
] |
Please provide a description of the function:def _maybe_to_categorical(array):
if isinstance(array, (ABCSeries, ABCCategoricalIndex)):
return array._values
elif isinstance(array, np.ndarray):
return Categorical(array)
return array | [
"\n Coerce to a categorical if a series is given.\n\n Internal use ONLY.\n "
] |
Please provide a description of the function:def contains(cat, key, container):
hash(key)
# get location of key in categories.
# If a KeyError, the key isn't in categories, so logically
# can't be in container either.
try:
loc = cat.categories.get_loc(key)
except KeyError:
return False
# loc is the location of key in categories, but also the *value*
# for key in container. So, `key` may be in categories,
# but still not in `container`. Example ('b' in categories,
# but not in values):
# 'b' in Categorical(['a'], categories=['a', 'b']) # False
if is_scalar(loc):
return loc in container
else:
# if categories is an IntervalIndex, loc is an array.
return any(loc_ in container for loc_ in loc) | [
"\n Helper for membership check for ``key`` in ``cat``.\n\n This is a helper method for :method:`__contains__`\n and :class:`CategoricalIndex.__contains__`.\n\n Returns True if ``key`` is in ``cat.categories`` and the\n location of ``key`` in ``categories`` is in ``container``.\n\n Parameters\n ----------\n cat : :class:`Categorical`or :class:`categoricalIndex`\n key : a hashable object\n The key to check membership for.\n container : Container (e.g. list-like or mapping)\n The container to check for membership in.\n\n Returns\n -------\n is_in : bool\n True if ``key`` is in ``self.categories`` and location of\n ``key`` in ``categories`` is in ``container``, else False.\n\n Notes\n -----\n This method does not check for NaN values. Do that separately\n before calling this method.\n "
] |
Please provide a description of the function:def _get_codes_for_values(values, categories):
from pandas.core.algorithms import _get_data_algo, _hashtables
dtype_equal = is_dtype_equal(values.dtype, categories.dtype)
if dtype_equal:
# To prevent erroneous dtype coercion in _get_data_algo, retrieve
# the underlying numpy array. gh-22702
values = getattr(values, '_ndarray_values', values)
categories = getattr(categories, '_ndarray_values', categories)
elif (is_extension_array_dtype(categories.dtype) and
is_object_dtype(values)):
# Support inferring the correct extension dtype from an array of
# scalar objects. e.g.
# Categorical(array[Period, Period], categories=PeriodIndex(...))
try:
values = (
categories.dtype.construct_array_type()._from_sequence(values)
)
except Exception:
# but that may fail for any reason, so fall back to object
values = ensure_object(values)
categories = ensure_object(categories)
else:
values = ensure_object(values)
categories = ensure_object(categories)
(hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables)
(_, _), cats = _get_data_algo(categories, _hashtables)
t = hash_klass(len(cats))
t.map_locations(cats)
return coerce_indexer_dtype(t.lookup(vals), cats) | [
"\n utility routine to turn values into codes given the specified categories\n "
] |
Please provide a description of the function:def _recode_for_categories(codes, old_categories, new_categories):
from pandas.core.algorithms import take_1d
if len(old_categories) == 0:
# All null anyway, so just retain the nulls
return codes.copy()
elif new_categories.equals(old_categories):
# Same categories, so no need to actually recode
return codes.copy()
indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories),
new_categories)
new_codes = take_1d(indexer, codes.copy(), fill_value=-1)
return new_codes | [
"\n Convert a set of codes for to a new set of categories\n\n Parameters\n ----------\n codes : array\n old_categories, new_categories : Index\n\n Returns\n -------\n new_codes : array\n\n Examples\n --------\n >>> old_cat = pd.Index(['b', 'a', 'c'])\n >>> new_cat = pd.Index(['a', 'b'])\n >>> codes = np.array([0, 1, 1, 2])\n >>> _recode_for_categories(codes, old_cat, new_cat)\n array([ 1, 0, 0, -1])\n "
] |
Please provide a description of the function:def _factorize_from_iterable(values):
from pandas.core.indexes.category import CategoricalIndex
if not is_list_like(values):
raise TypeError("Input must be list-like")
if is_categorical(values):
if isinstance(values, (ABCCategoricalIndex, ABCSeries)):
values = values._values
categories = CategoricalIndex(values.categories, dtype=values.dtype)
codes = values.codes
else:
# The value of ordered is irrelevant since we don't use cat as such,
# but only the resulting categories, the order of which is independent
# from ordered. Set ordered to False as default. See GH #15457
cat = Categorical(values, ordered=False)
categories = cat.categories
codes = cat.codes
return codes, categories | [
"\n Factorize an input `values` into `categories` and `codes`. Preserves\n categorical dtype in `categories`.\n\n *This is an internal function*\n\n Parameters\n ----------\n values : list-like\n\n Returns\n -------\n codes : ndarray\n categories : Index\n If `values` has a categorical dtype, then `categories` is\n a CategoricalIndex keeping the categories and order of `values`.\n "
] |
Please provide a description of the function:def _factorize_from_iterables(iterables):
if len(iterables) == 0:
# For consistency, it should return a list of 2 lists.
return [[], []]
return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables])) | [
"\n A higher-level wrapper over `_factorize_from_iterable`.\n\n *This is an internal function*\n\n Parameters\n ----------\n iterables : list-like of list-likes\n\n Returns\n -------\n codes_list : list of ndarrays\n categories_list : list of Indexes\n\n Notes\n -----\n See `_factorize_from_iterable` for more info.\n "
] |
Please provide a description of the function:def copy(self):
return self._constructor(values=self._codes.copy(),
dtype=self.dtype,
fastpath=True) | [
"\n Copy constructor.\n "
] |
Please provide a description of the function:def astype(self, dtype, copy=True):
if is_categorical_dtype(dtype):
# GH 10696/18593
dtype = self.dtype.update_dtype(dtype)
self = self.copy() if copy else self
if dtype == self.dtype:
return self
return self._set_dtype(dtype)
return np.array(self, dtype=dtype, copy=copy) | [
"\n Coerce this type to another dtype\n\n Parameters\n ----------\n dtype : numpy dtype or pandas type\n copy : bool, default True\n By default, astype always returns a newly allocated object.\n If copy is set to False and dtype is categorical, the original\n object is returned.\n\n .. versionadded:: 0.19.0\n\n "
] |
Please provide a description of the function:def _from_inferred_categories(cls, inferred_categories, inferred_codes,
dtype, true_values=None):
from pandas import Index, to_numeric, to_datetime, to_timedelta
cats = Index(inferred_categories)
known_categories = (isinstance(dtype, CategoricalDtype) and
dtype.categories is not None)
if known_categories:
# Convert to a specialized type with `dtype` if specified.
if dtype.categories.is_numeric():
cats = to_numeric(inferred_categories, errors="coerce")
elif is_datetime64_dtype(dtype.categories):
cats = to_datetime(inferred_categories, errors="coerce")
elif is_timedelta64_dtype(dtype.categories):
cats = to_timedelta(inferred_categories, errors="coerce")
elif dtype.categories.is_boolean():
if true_values is None:
true_values = ["True", "TRUE", "true"]
cats = cats.isin(true_values)
if known_categories:
# Recode from observation order to dtype.categories order.
categories = dtype.categories
codes = _recode_for_categories(inferred_codes, cats, categories)
elif not cats.is_monotonic_increasing:
# Sort categories and recode for unknown categories.
unsorted = cats.copy()
categories = cats.sort_values()
codes = _recode_for_categories(inferred_codes, unsorted,
categories)
dtype = CategoricalDtype(categories, ordered=False)
else:
dtype = CategoricalDtype(cats, ordered=False)
codes = inferred_codes
return cls(codes, dtype=dtype, fastpath=True) | [
"\n Construct a Categorical from inferred values.\n\n For inferred categories (`dtype` is None) the categories are sorted.\n For explicit `dtype`, the `inferred_categories` are cast to the\n appropriate type.\n\n Parameters\n ----------\n inferred_categories : Index\n inferred_codes : Index\n dtype : CategoricalDtype or 'category'\n true_values : list, optional\n If none are provided, the default ones are\n \"True\", \"TRUE\", and \"true.\"\n\n Returns\n -------\n Categorical\n "
] |
Please provide a description of the function:def from_codes(cls, codes, categories=None, ordered=None, dtype=None):
dtype = CategoricalDtype._from_values_or_dtype(categories=categories,
ordered=ordered,
dtype=dtype)
if dtype.categories is None:
msg = ("The categories must be provided in 'categories' or "
"'dtype'. Both were None.")
raise ValueError(msg)
codes = np.asarray(codes) # #21767
if not is_integer_dtype(codes):
msg = "codes need to be array-like integers"
if is_float_dtype(codes):
icodes = codes.astype('i8')
if (icodes == codes).all():
msg = None
codes = icodes
warn(("float codes will be disallowed in the future and "
"raise a ValueError"), FutureWarning, stacklevel=2)
if msg:
raise ValueError(msg)
if len(codes) and (
codes.max() >= len(dtype.categories) or codes.min() < -1):
raise ValueError("codes need to be between -1 and "
"len(categories)-1")
return cls(codes, dtype=dtype, fastpath=True) | [
"\n Make a Categorical type from codes and categories or dtype.\n\n This constructor is useful if you already have codes and\n categories/dtype and so do not need the (computation intensive)\n factorization step, which is usually done on the constructor.\n\n If your data does not follow this convention, please use the normal\n constructor.\n\n Parameters\n ----------\n codes : array-like, integers\n An integer array, where each integer points to a category in\n categories or dtype.categories, or else is -1 for NaN.\n categories : index-like, optional\n The categories for the categorical. Items need to be unique.\n If the categories are not given here, then they must be provided\n in `dtype`.\n ordered : bool, optional\n Whether or not this categorical is treated as an ordered\n categorical. If not given here or in `dtype`, the resulting\n categorical will be unordered.\n dtype : CategoricalDtype or the string \"category\", optional\n If :class:`CategoricalDtype`, cannot be used together with\n `categories` or `ordered`.\n\n .. versionadded:: 0.24.0\n\n When `dtype` is provided, neither `categories` nor `ordered`\n should be provided.\n\n Examples\n --------\n >>> dtype = pd.CategoricalDtype(['a', 'b'], ordered=True)\n >>> pd.Categorical.from_codes(codes=[0, 1, 0, 1], dtype=dtype)\n [a, b, a, b]\n Categories (2, object): [a < b]\n "
] |
Please provide a description of the function:def _get_codes(self):
v = self._codes.view()
v.flags.writeable = False
return v | [
"\n Get the codes.\n\n Returns\n -------\n codes : integer array view\n A non writable view of the `codes` array.\n "
] |
Please provide a description of the function:def _set_categories(self, categories, fastpath=False):
if fastpath:
new_dtype = CategoricalDtype._from_fastpath(categories,
self.ordered)
else:
new_dtype = CategoricalDtype(categories, ordered=self.ordered)
if (not fastpath and self.dtype.categories is not None and
len(new_dtype.categories) != len(self.dtype.categories)):
raise ValueError("new categories need to have the same number of "
"items than the old categories!")
self._dtype = new_dtype | [
"\n Sets new categories inplace\n\n Parameters\n ----------\n fastpath : bool, default False\n Don't perform validation of the categories for uniqueness or nulls\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'b'])\n >>> c\n [a, b]\n Categories (2, object): [a, b]\n\n >>> c._set_categories(pd.Index(['a', 'c']))\n >>> c\n [a, c]\n Categories (2, object): [a, c]\n "
] |
Please provide a description of the function:def _set_dtype(self, dtype):
codes = _recode_for_categories(self.codes, self.categories,
dtype.categories)
return type(self)(codes, dtype=dtype, fastpath=True) | [
"\n Internal method for directly updating the CategoricalDtype\n\n Parameters\n ----------\n dtype : CategoricalDtype\n\n Notes\n -----\n We don't do any validation here. It's assumed that the dtype is\n a (valid) instance of `CategoricalDtype`.\n "
] |
Please provide a description of the function:def set_ordered(self, value, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
new_dtype = CategoricalDtype(self.categories, ordered=value)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
if not inplace:
return cat | [
"\n Set the ordered attribute to the boolean value.\n\n Parameters\n ----------\n value : bool\n Set whether this categorical is ordered (True) or not (False).\n inplace : bool, default False\n Whether or not to set the ordered attribute in-place or return\n a copy of this categorical with ordered set to the value.\n "
] |
Please provide a description of the function:def as_ordered(self, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(True, inplace=inplace) | [
"\n Set the Categorical to be ordered.\n\n Parameters\n ----------\n inplace : bool, default False\n Whether or not to set the ordered attribute in-place or return\n a copy of this categorical with ordered set to True.\n "
] |
Please provide a description of the function:def as_unordered(self, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
return self.set_ordered(False, inplace=inplace) | [
"\n Set the Categorical to be unordered.\n\n Parameters\n ----------\n inplace : bool, default False\n Whether or not to set the ordered attribute in-place or return\n a copy of this categorical with ordered set to False.\n "
] |
Please provide a description of the function:def set_categories(self, new_categories, ordered=None, rename=False,
inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if ordered is None:
ordered = self.dtype.ordered
new_dtype = CategoricalDtype(new_categories, ordered=ordered)
cat = self if inplace else self.copy()
if rename:
if (cat.dtype.categories is not None and
len(new_dtype.categories) < len(cat.dtype.categories)):
# remove all _codes which are larger and set to -1/NaN
cat._codes[cat._codes >= len(new_dtype.categories)] = -1
else:
codes = _recode_for_categories(cat.codes, cat.categories,
new_dtype.categories)
cat._codes = codes
cat._dtype = new_dtype
if not inplace:
return cat | [
"\n Set the categories to the specified new_categories.\n\n `new_categories` can include new categories (which will result in\n unused categories) or remove old categories (which results in values\n set to NaN). If `rename==True`, the categories will simple be renamed\n (less or more items than in old categories will result in values set to\n NaN or in unused categories respectively).\n\n This method can be used to perform more than one action of adding,\n removing, and reordering simultaneously and is therefore faster than\n performing the individual steps via the more specialised methods.\n\n On the other hand this methods does not do checks (e.g., whether the\n old categories are included in the new categories on a reorder), which\n can result in surprising changes, for example when using special string\n dtypes on python3, which does not considers a S1 string equal to a\n single char python string.\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : bool, default False\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n rename : bool, default False\n Whether or not the new_categories should be considered as a rename\n of the old categories or as reordered categories.\n inplace : bool, default False\n Whether or not to reorder the categories in-place or return a copy\n of this categorical with reordered categories.\n\n Returns\n -------\n Categorical with reordered categories or None if inplace.\n\n Raises\n ------\n ValueError\n If new_categories does not validate as categories\n\n See Also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n "
] |
Please provide a description of the function:def rename_categories(self, new_categories, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
if isinstance(new_categories, ABCSeries):
msg = ("Treating Series 'new_categories' as a list-like and using "
"the values. In a future version, 'rename_categories' will "
"treat Series like a dictionary.\n"
"For dict-like, use 'new_categories.to_dict()'\n"
"For list-like, use 'new_categories.values'.")
warn(msg, FutureWarning, stacklevel=2)
new_categories = list(new_categories)
if is_dict_like(new_categories):
cat.categories = [new_categories.get(item, item)
for item in cat.categories]
elif callable(new_categories):
cat.categories = [new_categories(item) for item in cat.categories]
else:
cat.categories = new_categories
if not inplace:
return cat | [
"\n Rename categories.\n\n Parameters\n ----------\n new_categories : list-like, dict-like or callable\n\n * list-like: all items must be unique and the number of items in\n the new categories must match the existing number of categories.\n\n * dict-like: specifies a mapping from\n old categories to new. Categories not contained in the mapping\n are passed through and extra categories in the mapping are\n ignored.\n\n .. versionadded:: 0.21.0\n\n * callable : a callable that is called on all items in the old\n categories and whose return values comprise the new categories.\n\n .. versionadded:: 0.23.0\n\n .. warning::\n\n Currently, Series are considered list like. In a future version\n of pandas they'll be considered dict-like.\n\n inplace : bool, default False\n Whether or not to rename the categories inplace or return a copy of\n this categorical with renamed categories.\n\n Returns\n -------\n cat : Categorical or None\n With ``inplace=False``, the new categorical is returned.\n With ``inplace=True``, there is no return value.\n\n Raises\n ------\n ValueError\n If new categories are list-like and do not have the same number of\n items than the current categories or do not validate as categories\n\n See Also\n --------\n reorder_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'a', 'b'])\n >>> c.rename_categories([0, 1])\n [0, 0, 1]\n Categories (2, int64): [0, 1]\n\n For dict-like ``new_categories``, extra keys are ignored and\n categories not in the dictionary are passed through\n\n >>> c.rename_categories({'a': 'A', 'c': 'C'})\n [A, A, b]\n Categories (2, object): [A, b]\n\n You may also provide a callable to create the new categories\n\n >>> c.rename_categories(lambda x: x.upper())\n [A, A, B]\n Categories (2, object): [A, B]\n "
] |
Please provide a description of the function:def reorder_categories(self, new_categories, ordered=None, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if set(self.dtype.categories) != set(new_categories):
raise ValueError("items in new_categories are not the same as in "
"old categories")
return self.set_categories(new_categories, ordered=ordered,
inplace=inplace) | [
"\n Reorder categories as specified in new_categories.\n\n `new_categories` need to include all old categories and no new category\n items.\n\n Parameters\n ----------\n new_categories : Index-like\n The categories in new order.\n ordered : bool, optional\n Whether or not the categorical is treated as a ordered categorical.\n If not given, do not change the ordered information.\n inplace : bool, default False\n Whether or not to reorder the categories inplace or return a copy of\n this categorical with reordered categories.\n\n Returns\n -------\n cat : Categorical with reordered categories or None if inplace.\n\n Raises\n ------\n ValueError\n If the new categories do not contain all old category items or any\n new ones\n\n See Also\n --------\n rename_categories\n add_categories\n remove_categories\n remove_unused_categories\n set_categories\n "
] |
Please provide a description of the function:def add_categories(self, new_categories, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(new_categories):
new_categories = [new_categories]
already_included = set(new_categories) & set(self.dtype.categories)
if len(already_included) != 0:
msg = ("new categories must not include old categories: "
"{already_included!s}")
raise ValueError(msg.format(already_included=already_included))
new_categories = list(self.dtype.categories) + list(new_categories)
new_dtype = CategoricalDtype(new_categories, self.ordered)
cat = self if inplace else self.copy()
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories)
if not inplace:
return cat | [
"\n Add new categories.\n\n `new_categories` will be included at the last/highest place in the\n categories and will be unused directly after this call.\n\n Parameters\n ----------\n new_categories : category or list-like of category\n The new categories to be included.\n inplace : bool, default False\n Whether or not to add the categories inplace or return a copy of\n this categorical with added categories.\n\n Returns\n -------\n cat : Categorical with new categories added or None if inplace.\n\n Raises\n ------\n ValueError\n If the new categories include old categories or do not validate as\n categories\n\n See Also\n --------\n rename_categories\n reorder_categories\n remove_categories\n remove_unused_categories\n set_categories\n "
] |
Please provide a description of the function:def remove_categories(self, removals, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_list_like(removals):
removals = [removals]
removal_set = set(list(removals))
not_included = removal_set - set(self.dtype.categories)
new_categories = [c for c in self.dtype.categories
if c not in removal_set]
# GH 10156
if any(isna(removals)):
not_included = [x for x in not_included if notna(x)]
new_categories = [x for x in new_categories if notna(x)]
if len(not_included) != 0:
msg = "removals must all be in old categories: {not_included!s}"
raise ValueError(msg.format(not_included=not_included))
return self.set_categories(new_categories, ordered=self.ordered,
rename=False, inplace=inplace) | [
"\n Remove the specified categories.\n\n `removals` must be included in the old categories. Values which were in\n the removed categories will be set to NaN\n\n Parameters\n ----------\n removals : category or list of categories\n The categories which should be removed.\n inplace : bool, default False\n Whether or not to remove the categories inplace or return a copy of\n this categorical with removed categories.\n\n Returns\n -------\n cat : Categorical with removed categories or None if inplace.\n\n Raises\n ------\n ValueError\n If the removals are not contained in the categories\n\n See Also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_unused_categories\n set_categories\n "
] |
Please provide a description of the function:def remove_unused_categories(self, inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
cat = self if inplace else self.copy()
idx, inv = np.unique(cat._codes, return_inverse=True)
if idx.size != 0 and idx[0] == -1: # na sentinel
idx, inv = idx[1:], inv - 1
new_categories = cat.dtype.categories.take(idx)
new_dtype = CategoricalDtype._from_fastpath(new_categories,
ordered=self.ordered)
cat._dtype = new_dtype
cat._codes = coerce_indexer_dtype(inv, new_dtype.categories)
if not inplace:
return cat | [
"\n Remove categories which are not used.\n\n Parameters\n ----------\n inplace : bool, default False\n Whether or not to drop unused categories inplace or return a copy of\n this categorical with unused categories dropped.\n\n Returns\n -------\n cat : Categorical with unused categories dropped or None if inplace.\n\n See Also\n --------\n rename_categories\n reorder_categories\n add_categories\n remove_categories\n set_categories\n "
] |
Please provide a description of the function:def map(self, mapper):
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories),
np.nan)
return np.take(new_categories, self._codes) | [
"\n Map categories using input correspondence (dict, Series, or function).\n\n Maps the categories to new categories. If the mapping correspondence is\n one-to-one the result is a :class:`~pandas.Categorical` which has the\n same order property as the original, otherwise a :class:`~pandas.Index`\n is returned. NaN values are unaffected.\n\n If a `dict` or :class:`~pandas.Series` is used any unmapped category is\n mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`\n will be returned.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n\n Returns\n -------\n pandas.Categorical or pandas.Index\n Mapped categorical.\n\n See Also\n --------\n CategoricalIndex.map : Apply a mapping correspondence on a\n :class:`~pandas.CategoricalIndex`.\n Index.map : Apply a mapping correspondence on an\n :class:`~pandas.Index`.\n Series.map : Apply a mapping correspondence on a\n :class:`~pandas.Series`.\n Series.apply : Apply more complex functions on a\n :class:`~pandas.Series`.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n [a, b, c]\n Categories (3, object): [a, b, c]\n >>> cat.map(lambda x: x.upper())\n [A, B, C]\n Categories (3, object): [A, B, C]\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})\n [first, second, third]\n Categories (3, object): [first, second, third]\n\n If the mapping is one-to-one the ordering of the categories is\n preserved:\n\n >>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)\n >>> cat\n [a, b, c]\n Categories (3, object): [a < b < c]\n >>> cat.map({'a': 3, 'b': 2, 'c': 1})\n [3, 2, 1]\n Categories (3, int64): [3 < 2 < 1]\n\n If the mapping is not one-to-one an :class:`~pandas.Index` is returned:\n\n >>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})\n Index(['first', 'second', 'first'], dtype='object')\n\n If a `dict` is used, all unmapped categories are mapped to `NaN` and\n the result is an :class:`~pandas.Index`:\n\n >>> cat.map({'a': 'first', 'b': 'second'})\n Index(['first', 'second', nan], dtype='object')\n "
] |
Please provide a description of the function:def shift(self, periods, fill_value=None):
# since categoricals always have ndim == 1, an axis parameter
# doesn't make any sense here.
codes = self.codes
if codes.ndim > 1:
raise NotImplementedError("Categorical with ndim > 1.")
if np.prod(codes.shape) and (periods != 0):
codes = np.roll(codes, ensure_platform_int(periods), axis=0)
if isna(fill_value):
fill_value = -1
elif fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
raise ValueError("'fill_value={}' is not present "
"in this Categorical's "
"categories".format(fill_value))
if periods > 0:
codes[:periods] = fill_value
else:
codes[periods:] = fill_value
return self.from_codes(codes, dtype=self.dtype) | [
"\n Shift Categorical by desired number of periods.\n\n Parameters\n ----------\n periods : int\n Number of periods to move, can be positive or negative\n fill_value : object, optional\n The scalar value to use for newly introduced missing values.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n shifted : Categorical\n "
] |
Please provide a description of the function:def memory_usage(self, deep=False):
return self._codes.nbytes + self.dtype.categories.memory_usage(
deep=deep) | [
"\n Memory usage of my values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False\n\n See Also\n --------\n numpy.ndarray.nbytes\n "
] |
Please provide a description of the function:def value_counts(self, dropna=True):
from numpy import bincount
from pandas import Series, CategoricalIndex
code, cat = self._codes, self.categories
ncat, mask = len(cat), 0 <= code
ix, clean = np.arange(ncat), mask.all()
if dropna or clean:
obs = code if clean else code[mask]
count = bincount(obs, minlength=ncat or None)
else:
count = bincount(np.where(mask, code, ncat))
ix = np.append(ix, -1)
ix = self._constructor(ix, dtype=self.dtype,
fastpath=True)
return Series(count, index=CategoricalIndex(ix), dtype='int64') | [
"\n Return a Series containing counts of each category.\n\n Every category will have an entry, even those with a count of 0.\n\n Parameters\n ----------\n dropna : bool, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.value_counts\n\n "
] |
Please provide a description of the function:def get_values(self):
# if we are a datetime and period index, return Index to keep metadata
if is_datetimelike(self.categories):
return self.categories.take(self._codes, fill_value=np.nan)
elif is_integer_dtype(self.categories) and -1 in self._codes:
return self.categories.astype("object").take(self._codes,
fill_value=np.nan)
return np.array(self) | [
"\n Return the values.\n\n For internal compatibility with pandas formatting.\n\n Returns\n -------\n numpy.array\n A numpy array of the same dtype as categorical.categories.dtype or\n Index if datetime / periods.\n "
] |
Please provide a description of the function:def sort_values(self, inplace=False, ascending=True, na_position='last'):
inplace = validate_bool_kwarg(inplace, 'inplace')
if na_position not in ['last', 'first']:
msg = 'invalid na_position: {na_position!r}'
raise ValueError(msg.format(na_position=na_position))
sorted_idx = nargsort(self,
ascending=ascending,
na_position=na_position)
if inplace:
self._codes = self._codes[sorted_idx]
else:
return self._constructor(values=self._codes[sorted_idx],
dtype=self.dtype,
fastpath=True) | [
"\n Sort the Categorical by category value returning a new\n Categorical by default.\n\n While an ordering is applied to the category values, sorting in this\n context refers more to organizing and grouping together based on\n matching category values. Thus, this function can be called on an\n unordered Categorical instance unlike the functions 'Categorical.min'\n and 'Categorical.max'.\n\n Parameters\n ----------\n inplace : bool, default False\n Do operation in place.\n ascending : bool, default True\n Order ascending. Passing False orders descending. The\n ordering parameter provides the method by which the\n category values are organized.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Returns\n -------\n Categorical or None\n\n See Also\n --------\n Categorical.sort\n Series.sort_values\n\n Examples\n --------\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n >>> c\n [1, 2, 2, 1, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values()\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values(ascending=False)\n [5, 2, 2, 1, 1]\n Categories (3, int64): [1, 2, 5]\n\n Inplace sorting can be done as well:\n\n >>> c.sort_values(inplace=True)\n >>> c\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>>\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n\n 'sort_values' behaviour with NaNs. Note that 'na_position'\n is independent of the 'ascending' parameter:\n\n >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])\n >>> c\n [NaN, 2.0, 2.0, NaN, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values()\n [2.0, 2.0, 5.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False)\n [5.0, 2.0, 2.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(na_position='first')\n [NaN, NaN, 2.0, 2.0, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False, na_position='first')\n [NaN, NaN, 5.0, 2.0, 2.0]\n Categories (2, int64): [2, 5]\n "
] |
Please provide a description of the function:def _values_for_rank(self):
from pandas import Series
if self.ordered:
values = self.codes
mask = values == -1
if mask.any():
values = values.astype('float64')
values[mask] = np.nan
elif self.categories.is_numeric():
values = np.array(self)
else:
# reorder the categories (so rank can use the float codes)
# instead of passing an object array to rank
values = np.array(
self.rename_categories(Series(self.categories).rank().values)
)
return values | [
"\n For correctly ranking ordered categorical data. See GH#15420\n\n Ordered categorical data should be ranked on the basis of\n codes with -1 translated to NaN.\n\n Returns\n -------\n numpy.array\n\n "
] |
Please provide a description of the function:def fillna(self, value=None, method=None, limit=None):
value, method = validate_fillna_kwargs(
value, method, validate_scalar_dict_value=False
)
if value is None:
value = np.nan
if limit is not None:
raise NotImplementedError("specifying a limit for fillna has not "
"been implemented yet")
codes = self._codes
# pad / bfill
if method is not None:
values = self.to_dense().reshape(-1, len(self))
values = interpolate_2d(values, method, 0, None,
value).astype(self.categories.dtype)[0]
codes = _get_codes_for_values(values, self.categories)
else:
# If value is a dict or a Series (a dict value has already
# been converted to a Series)
if isinstance(value, ABCSeries):
if not value[~value.isin(self.categories)].isna().all():
raise ValueError("fill value must be in categories")
values_codes = _get_codes_for_values(value, self.categories)
indexer = np.where(values_codes != -1)
codes[indexer] = values_codes[values_codes != -1]
# If value is not a dict or Series it should be a scalar
elif is_hashable(value):
if not isna(value) and value not in self.categories:
raise ValueError("fill value must be in categories")
mask = codes == -1
if mask.any():
codes = codes.copy()
if isna(value):
codes[mask] = -1
else:
codes[mask] = self.categories.get_loc(value)
else:
raise TypeError('"value" parameter must be a scalar, dict '
'or Series, but you passed a '
'"{0}"'.format(type(value).__name__))
return self._constructor(codes, dtype=self.dtype, fastpath=True) | [
"\n Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, a Series or dict can be used to fill in different\n values for each index. The value should not be a list. The\n value(s) passed should either be in the categories or should be\n NaN.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n (Not implemented yet for Categorical!)\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : Categorical with NA/NaN filled\n "
] |
Please provide a description of the function:def take_nd(self, indexer, allow_fill=None, fill_value=None):
indexer = np.asarray(indexer, dtype=np.intp)
if allow_fill is None:
if (indexer < 0).any():
warn(_take_msg, FutureWarning, stacklevel=2)
allow_fill = True
dtype = self.dtype
if isna(fill_value):
fill_value = -1
elif allow_fill:
# convert user-provided `fill_value` to codes
if fill_value in self.categories:
fill_value = self.categories.get_loc(fill_value)
else:
msg = (
"'fill_value' ('{}') is not in this Categorical's "
"categories."
)
raise TypeError(msg.format(fill_value))
codes = take(self._codes, indexer, allow_fill=allow_fill,
fill_value=fill_value)
result = type(self).from_codes(codes, dtype=dtype)
return result | [
"\n Take elements from the Categorical.\n\n Parameters\n ----------\n indexer : sequence of int\n The indices in `self` to take. The meaning of negative values in\n `indexer` depends on the value of `allow_fill`.\n allow_fill : bool, default None\n How to handle negative values in `indexer`.\n\n * False: negative values in `indices` indicate positional indices\n from the right. This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate missing values\n (the default). These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n .. versionchanged:: 0.23.0\n\n Deprecated the default value of `allow_fill`. The deprecated\n default is ``True``. In the future, this will change to\n ``False``.\n\n fill_value : object\n The value to use for `indices` that are missing (-1), when\n ``allow_fill=True``. This should be the category, i.e. a value\n in ``self.categories``, not a code.\n\n Returns\n -------\n Categorical\n This Categorical will have the same categories and ordered as\n `self`.\n\n See Also\n --------\n Series.take : Similar method for Series.\n numpy.ndarray.take : Similar method for NumPy arrays.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'a', 'b'])\n >>> cat\n [a, a, b]\n Categories (2, object): [a, b]\n\n Specify ``allow_fill==False`` to have negative indices mean indexing\n from the right.\n\n >>> cat.take([0, -1, -2], allow_fill=False)\n [a, b, a]\n Categories (2, object): [a, b]\n\n With ``allow_fill=True``, indices equal to ``-1`` mean \"missing\"\n values that should be filled with the `fill_value`, which is\n ``np.nan`` by default.\n\n >>> cat.take([0, -1, -1], allow_fill=True)\n [a, NaN, NaN]\n Categories (2, object): [a, b]\n\n The fill value can be specified.\n\n >>> cat.take([0, -1, -1], allow_fill=True, fill_value='a')\n [a, a, a]\n Categories (3, object): [a, b]\n\n Specifying a fill value that's not in ``self.categories``\n will raise a ``TypeError``.\n "
] |
Please provide a description of the function:def _slice(self, slicer):
# only allow 1 dimensional slicing, but can
# in a 2-d case be passd (slice(None),....)
if isinstance(slicer, tuple) and len(slicer) == 2:
if not com.is_null_slice(slicer[0]):
raise AssertionError("invalid slicing for a 1-ndim "
"categorical")
slicer = slicer[1]
codes = self._codes[slicer]
return self._constructor(values=codes, dtype=self.dtype, fastpath=True) | [
"\n Return a slice of myself.\n\n For internal compatibility with numpy arrays.\n "
] |
Please provide a description of the function:def _tidy_repr(self, max_vals=10, footer=True):
num = max_vals // 2
head = self[:num]._get_repr(length=False, footer=False)
tail = self[-(max_vals - num):]._get_repr(length=False, footer=False)
result = '{head}, ..., {tail}'.format(head=head[:-1], tail=tail[1:])
if footer:
result = '{result}\n{footer}'.format(
result=result, footer=self._repr_footer())
return str(result) | [
" a short repr displaying only max_vals and an optional (but default\n footer)\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.