prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from __future__ import division
import math
# Computing the full set of metrics requires several "big" packages, but we
# still want the basic GR computation (which only uses the stdlib) to be
# available regardless.
try:
import pandas as pd
import numpy as np
import scipy.optimize, scipy.stats
_packages_available = True
except ImportError:
_packages_available = False
__all__ = ['compute_gr', 'compute_gr_single', 'gr_metrics', 'logistic']
def _normalize_log2(n, n_0_0):
normalized = max(n / n_0_0, 1e-6) # avoiding negative and null values
return math.log(normalized, 2)
def compute_gr_single(record):
"""Compute Growth Response value for a single sample.
The input is a namedtuple or pandas Series with at least the following
numeric fields:
* cell_count: Number of cells detected in this sample.
* cell_count__time0: Number of cells in the treatment_duration=0 control for this sample.
* cell_count__ctrl: Number of cells in the no-perturbation control.
Parameters
----------
record : Union[namedtuple, pandas.Series]
Input data on which to compute the GR value.
Returns
-------
float
The computed GR value.
Example
-------
>>> from collections import namedtuple
>>> Record = namedtuple('Record',
... ['cell_count', 'cell_count__ctrl', 'cell_count__time0'])
>>> rec = Record(cell_count=1710, cell_count__ctrl=1766.0,
... cell_count__time0=492.8)
>>> print compute_gr_single(rec)
0.965305500206
"""
cc_t0 = float(record.cell_count__time0)
log2nn = _normalize_log2(float(record.cell_count), cc_t0)
log2nn_ctrl = _normalize_log2(float(record.cell_count__ctrl), cc_t0)
gr = 2 ** (log2nn / log2nn_ctrl) - 1
return gr
def assign_ctrls(data, keys):
'''Compute and assign the controls based on keys
The input dataframe must contain the column cell_count
The input keys is a list of column names that will be used to assign controls
The column 'treatment_duration' is used as a default key and necessary
Matching of conditions is based on the labels in the column 'role':
conditions labeled 'treatmemt' are normalized to conditions 'negative_control'
Parameters
----------
data : pandas.DataFrame
Input data on which to assign the control values.
Returns
-------
pandas.DataFrame
Deep copy of input data with columns 'cell_count__ctrl' and
'cell_count__time0' appended.
Example
-------
df_out = assign_ctrl(df_in, ['cell_line', 'serum_pct'])
'''
# add 'treatment_duration' from the set as it is a default one
keys = list(set(keys) - set(['treatment_duration']))
df_keys = data[keys].drop_duplicates()
df_keys.reset_index(inplace=True)
dfout = pd.DataFrame(columns=data.columns).copy()
for i in range(len(df_keys)):
idx = np.ones(len(data))==1
for k in keys:
idx &= data[k] == df_keys[k][i]
x0 = np.mean(data.loc[idx & (data.treatment_duration==0), 'cell_count'].values)
times = data['treatment_duration'][idx & (data.treatment_duration>0)].drop_duplicates().values
for t in times:
idx_t = idx & (data.treatment_duration==t) & (data.role=='treatment')
x_ctrl = np.mean(data.loc[idx & (data.treatment_duration==t) & \
(data.role=='negative_control'), 'cell_count'].values)
df_ctrl = pd.DataFrame(np.repeat([[x0, x_ctrl]],sum(idx_t),axis=0),
columns=['cell_count__time0', 'cell_count__ctrl'])
dfout = dfout.append(pd.concat([data.loc[idx_t, :].reset_index(drop=True), df_ctrl],
axis=1),
ignore_index=True)
return dfout
def compute_gr(data):
"""Compute Growth Response value for an entire dataset.
The input dataframe must contain at least the following numeric fields:
* cell_count: Number of cells detected per sample.
* cell_count__time0: Number of cells in the treatment_duration=0 control for each sample.
* cell_count__ctrl: Number of cells in the no-perturbation control.
The input must not already contain a column named 'GRvalue'.
A new dataframe will be returned with the GR values stored in a new
'GRvalue' column.
Parameters
----------
data : pandas.DataFrame
Input data on which to compute the metrics.
Returns
-------
pandas.DataFrame
Shallow copy of input data with a 'GRvalue' column appended.
Example
-------
"""
if 'GRvalue' in data:
raise ValueError("Data already contains a 'GRvalue' column; aborting")
result = data.copy(deep=False)
result['GRvalue'] = data.apply(compute_gr_single, axis=1)
return result
def logistic(x, params):
"""Evaluate logistic curve equation using log-transformed x_0.
Parameters
----------
x
X-value at which to evaluate the logistic function.
params : list
* einf: maximum Y-value (effect)
* log10_mid: log10-transformed X-value of midpoint
* slope: Steepness of curve
Returns
-------
float
Y-value of logistic function.
"""
einf, log10_mid, slope = params
emin = 1.0
mid = 10 ** log10_mid
return ( (emin-einf) / (1 + ((x/mid)**slope) ) ) + einf
def _logistic_inv(y, params):
einf, log10_mid, slope = params
emin = 1.0
mid = 10 ** log10_mid
if y >= min(emin, einf) and y <= max(emin, einf):
return mid * ( (y-emin) / (einf-y) ) ** (1/slope)
else:
return np.inf
def _flat(x, params):
y, = params
return y
def _rss(params, fn, xdata, ydata):
rss = 0.0
for x, y in zip(xdata, ydata):
rss += (y - fn(x, params)) ** 2
return rss
def _tss(ydata):
tss = 0.0
y_mean = ydata.mean()
for y in ydata:
tss += (y - y_mean) ** 2
return tss
def _rsquare(params, fn, xdata, ydata):
ss_res = _rss(params, fn, xdata, ydata)
ss_tot = _tss(ydata)
return 1 - ss_res / ss_tot
def _fit(fn, xdata, ydata, prior, bounds):
res = scipy.optimize.minimize(_rss, args=(fn, xdata, ydata),
x0=prior, bounds=bounds)
return res
def _calculate_pval(logistic_result, flat_result, n):
rss2 = logistic_result.fun
rss1 = flat_result.fun
df1 = len(logistic_result.x) - len(flat_result.x)
df2 = n - len(logistic_result.x) + 1
f = ( (rss1-rss2)/df1 ) / (rss2/df2)
pval = 1 - scipy.stats.f.cdf(f, df1, df2)
return pval
def _mklist(values):
"""Convert tuple to list, and anything else to a list with just that thing.
This is a helper to fix an inconsistency with the group keys in a
pandas.groupby object. When grouping by multiple columns, the keys are
tuples of values. When grouping by a single column, even if specified as a
single-element list, the keys are single values. We always want a list,
which is what this function accomplishes."""
if isinstance(values, tuple):
return list(values)
else:
return [values]
def _metrics(df, alpha):
conc_min = df.concentration.min() / 100
conc_max = df.concentration.max() * 100
bounds = np.array([[-1, 1], np.log10([conc_min, conc_max]), [0.1, 5]])
prior = np.array([0.1, np.log10(np.median(df.concentration)), 2])
logistic_result = _fit(logistic, df.concentration, df.GRvalue,
prior, bounds)
flat_result = _fit(_flat, df.concentration, df.GRvalue,
prior[[0]], bounds[[0]])
pval = _calculate_pval(logistic_result, flat_result, len(df.concentration))
if pval > alpha or not logistic_result.success:
# Return values for the metrics such that the logistic function will
# produce the same curve as the flat fit.
if flat_result.x[0] > 0.5:
gr50 = np.inf
else:
gr50 = -np.inf
inf = flat_result.x[0]
gec50 = 0.0
# Must be non-zero or the logistic function will error.
slope = 0.01
r2 = _rsquare(flat_result.x, _flat, df.concentration, df.GRvalue)
else:
gr50 = _logistic_inv(0.5, logistic_result.x)
inf = logistic_result.x[0]
gec50 = 10 ** logistic_result.x[1]
slope = logistic_result.x[2]
r2 = _rsquare(logistic_result.x, logistic, df.concentration, df.GRvalue)
# Take the minimum across the highest 2 doses to minimize the effect of
# outliers (robust minimum).
max_ = min(df.GRvalue[-2:])
log_conc = np.log10(df.concentration)
# Normalize AOC by concentration range (width of curve).
aoc_width = log_conc.max() - log_conc.min()
aoc = np.trapz(1 - df.GRvalue, log_conc) / aoc_width
return [gr50, max_, aoc, gec50, inf, slope, r2, pval]
def gr_metrics(data, alpha=0.05):
"""Compute Growth Response metrics for an entire dataset.
The input dataframe must contain a column named 'concentration' with the
dose values of the perturbing agent and a column named 'GRvalue' with the
corresponding growth response (GR) values. Columns named 'cell_count',
'cell_count__ctrl' and 'cell_count__time0', which are used by the compute_gr
function, will be ignored if they are still present in your dataframe.
Multiple dose-response experiments may be combined into a single dataframe
by adding extra 'key' columns to distinguish them from each other. Each
unique combination of values in the key columns will produce one
corresponding row in the returned dataframe. The columns are the keys
(if present) and metric names.
The computed metrics are:
* GR50: Dose at which GR reaches 0.5.
* GRmax: Maximum observed GR effect (minimum value).
* GR_AOC: Area over the curve of observed data points. Mathematically this
is calculated as 1-GR so that increasing GR_AOC values correspond to
increasing agent effect. Also note the x-axis (concentration) values are
log10-transformed, and the entire area is normalized by the width
(difference between maximum and minimum concentration).
* GEC50: Dose at which GR is halfway between 1 and GRinf.
* GRinf: Extrapolated GR value at infinite dose.
* h_GR: Hill slope of fitted GR logistic curve.
* r2_GR: R squared of fitted GR logistic curve.
* pval_GR: P-value from the F-test for the GR logistic curve (see below).
The input data for each experiment are fitted with a logistic curve. An
F-test is then performed with the null hypothesis being that there is no
dose response, i.e. the data can be fitted well with a straight horizontal
line. If the null hypothesis is not rejected, the returned metrics are
chosen such that the logistic curve determined by GRinf, GEC50 and h_GR is
equivalent to the horizontal line fit, and GR50 is infinite (potentially
positive *or* negative).
Parameters
----------
data : pandas.DataFrame
Input data on which to compute the metrics.
alpha : Optional[float]
Significance level for the F-test.
Returns
-------
pandas.DataFrame
The computed metrics.
Example
-------
>>> import pandas
>>> data = pandas.DataFrame(
... [['A', 0.001, 0.965], ['A', 0.01, 0.953], ['A', 0.1, 0.533],
... ['A', 1.0, 0.0976], ['A', 10.0, 0.0188],
... ['B', 0.001, 0.985], ['B', 0.01, 0.916], ['B', 0.1, 0.978],
... ['B', 1.0, 1.04], ['B', 10.0, 0.936]],
... columns=['drug', 'concentration', 'GRvalue'])
>>> print gr_metrics(data)
drug GR50 GRmax GR_AOC GEC50 GRinf h_GR \\
0 A 0.114025 0.0188 0.481125 0.110411 0.018109 1.145262
1 B inf 0.9360 0.026375 0.000000 0.971000 0.010000
<BLANKLINE>
r2_GR pval_GR
0 9.985790e-01 0.000054
1 -1.243450e-14 1.000000
"""
if not _packages_available:
raise RuntimeError("Please install numpy, scipy and pandas in order "
"to use this function")
non_keys = set(('concentration', 'cell_count', 'cell_count__ctrl',
'cell_count__time0', 'GRvalue'))
metric_columns = ['GR50', 'GRmax', 'GR_AOC', 'GEC50', 'GRinf', 'h_GR', 'r2_GR',
'pval_GR']
keys = list(set(data.columns) - non_keys)
gb = data.groupby(keys)
data = [_mklist(k) + _metrics(v, alpha) for k, v in gb]
df = | pd.DataFrame(data, columns=keys + metric_columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
import pandas.util.testing as tm
from pandas import DatetimeIndex, MultiIndex
from pandas._libs import hashtable
from pandas.compat import range, u
@pytest.mark.parametrize('names', [None, ['first', 'second']])
def test_unique(names):
mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('abab')],
names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names)
tm.assert_index_equal(res, exp)
mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names)
res = mi.unique()
exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names)
tm.assert_index_equal(res, exp)
# GH #20568 - empty MI
mi = MultiIndex.from_arrays([[], []], names=names)
res = mi.unique()
tm.assert_index_equal(mi, res)
def test_unique_datetimelike():
idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01',
'2015-01-01', 'NaT', 'NaT'])
idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02',
'2015-01-02', 'NaT', '2015-01-01'],
tz='Asia/Tokyo')
result = MultiIndex.from_arrays([idx1, idx2]).unique()
eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT'])
eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02',
'NaT', '2015-01-01'],
tz='Asia/Tokyo')
exp = MultiIndex.from_arrays([eidx1, eidx2])
tm.assert_index_equal(result, exp)
@pytest.mark.parametrize('level', [0, 'first', 1, 'second'])
def test_unique_level(idx, level):
# GH #17896 - with level= argument
result = idx.unique(level=level)
expected = idx.get_level_values(level).unique()
tm.assert_index_equal(result, expected)
# With already unique level
mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]],
names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
tm.assert_index_equal(result, expected)
# With empty MI
mi = MultiIndex.from_arrays([[], []], names=['first', 'second'])
result = mi.unique(level=level)
expected = mi.get_level_values(level)
@pytest.mark.parametrize('dropna', [True, False])
def test_get_unique_index(idx, dropna):
mi = idx[[0, 1, 0, 1, 1, 0, 0]]
expected = mi._shallow_copy(mi[[0, 1]])
result = mi._get_unique_index(dropna=dropna)
assert result.unique
tm.assert_index_equal(result, expected)
def test_duplicate_multiindex_labels():
# GH 17464
# Make sure that a MultiIndex with duplicate levels throws a ValueError
with pytest.raises(ValueError):
mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)])
# And that using set_levels with duplicate levels fails
mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'],
[1, 2, 1, 2, 3]])
with pytest.raises(ValueError):
mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]],
inplace=True)
@pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2],
[1, 'a', 1]])
def test_duplicate_level_names(names):
# GH18872, GH19029
mi = MultiIndex.from_product([[0, 1]] * 3, names=names)
assert mi.names == names
# With .rename()
mi = MultiIndex.from_product([[0, 1]] * 3)
mi = mi.rename(names)
assert mi.names == names
# With .rename(., level=)
mi.rename(names[1], level=1, inplace=True)
mi = mi.rename([names[0], names[2]], level=[0, 2])
assert mi.names == names
def test_duplicate_meta_data():
# GH 10115
mi = MultiIndex(
levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
for idx in [mi,
mi.set_names([None, None]),
mi.set_names([None, 'Num']),
mi.set_names(['Upper', 'Num']), ]:
assert idx.has_duplicates
assert idx.drop_duplicates().names == idx.names
def test_has_duplicates(idx, idx_dup):
# see fixtures
assert idx.is_unique is True
assert idx.has_duplicates is False
assert idx_dup.is_unique is False
assert idx_dup.has_duplicates is True
mi = MultiIndex(levels=[[0, 1], [0, 1, 2]],
labels=[[0, 0, 0, 0, 1, 1, 1],
[0, 1, 2, 0, 0, 1, 2]])
assert mi.is_unique is False
assert mi.has_duplicates is True
def test_has_duplicates_from_tuples():
# GH 9075
t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169),
(u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119),
(u('x'), u('out'), u('z'), 9, u('y'), u('in'), u('z'), 135),
(u('x'), u('out'), u('z'), 13, u('y'), u('in'), u('z'), 145),
(u('x'), u('out'), u('z'), 14, u('y'), u('in'), u('z'), 158),
(u('x'), u('out'), u('z'), 16, u('y'), u('in'), u('z'), 122),
(u('x'), u('out'), u('z'), 17, u('y'), u('in'), u('z'), 160),
(u('x'), u('out'), u('z'), 18, u('y'), u('in'), u('z'), 180),
(u('x'), u('out'), u('z'), 20, u('y'), u('in'), u('z'), 143),
(u('x'), u('out'), u('z'), 21, u('y'), u('in'), u('z'), 128),
(u('x'), | u('out') | pandas.compat.u |
# -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
# from dotenv import find_dotenv, load_dotenv
import requests
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import datetime
import yfinance as yf
from pandas_datareader import data as pdr
from flask import current_app
from stk_predictor.extensions import db
@click.command()
@click.argument('input_filepath', type=click.Path(exists=True))
@click.argument('output_filepath', type=click.Path())
def main(input_filepath, output_filepath):
""" Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info('making final data set from raw data')
def get_ticker_from_yahoo(ticker, start_date, end_date):
yf.pdr_override()
try:
new_trading_df = pdr.get_data_yahoo(
ticker, start_date, end_date, interval='1d')
new_trading_df = new_trading_df.drop(
['Open', 'High', 'Low', 'Adj Close'], axis=1)
new_trading_df = new_trading_df.dropna('index')
new_trading_df = new_trading_df.reset_index()
new_trading_df.columns = ['trading_date',
'intraday_close', 'intraday_volumes']
his_trading_df = pd.read_sql('aapl', db.engine, index_col='id')
df = pd.concat([his_trading_df, new_trading_df]
).drop_duplicates('trading_date')
df = df.sort_values(by='trading_date')
df = df.reset_index(drop=True)
if len(df) > 0:
df.to_sql("aapl", db.engine, if_exists='replace', index_label='id')
return df
else:
# t = pd.read_sql('aapl', db.engine, index_col='id')
return None
except Exception as ex:
raise RuntimeError(
"Catch Excetion when retrieve data from Yahoo...", ex)
return None
def get_news_from_finviz(ticker):
"""Request news headline from finviz, according to
company ticker's name
Parameters
-----------
ticker: str
the stock ticker name
Return
----------
df : pd.DataFrame
return the latest 2 days news healines.
"""
current_app.logger.info("Job >> Enter Finviz news scrape step...")
base_url = 'https://finviz.com/quote.ashx?t={}'.format(ticker)
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/50.0.2661.102 Safari/537.36'
}
parsed_news = []
try:
res = requests.get(base_url, headers=headers)
if res.status_code == 200:
texts = res.text
soup = BeautifulSoup(texts)
news_tables = soup.find(id="news-table")
for x in news_tables.findAll('tr'):
text = x.a.get_text()
date_scrape = x.td.text.split()
if len(date_scrape) == 1:
time = date_scrape[0]
else:
date = date_scrape[0]
time = date_scrape[1]
parsed_news.append([date, time, text])
# filter the recent day news
df = pd.DataFrame(parsed_news, columns=['date', 'time', 'texts'])
df['date'] = pd.to_datetime(df.date).dt.date
one_day_period = (datetime.datetime.today() -
datetime.timedelta(days=1)).date()
df_sub = df[df.date >= one_day_period]
return df_sub
else:
raise RuntimeError("HTTP response Error {}".format(
res.status_code)) from None
except Exception as ex:
current_app.logger.info("Exception in scrape Finviz.", ex)
raise RuntimeError("Exception in scrape Finviz.") from ex
def prepare_trading_dataset(df):
"""Prepare the trading data set.
Time series analysis incoporate previous data for future prediction,
We need to retrieve historical data to generate features.
Parameters
-----------
df: DataFrame
the stock ticker trading data, including trading-date, close-price, volumes
window: int, default = 400
feature engineer windows size. Using at most 400 trading days to construct
features.
Return
----------
array_lstm : np.array
return the array with 3 dimensions shape -> [samples, 1, features]
"""
if len(df) == 0:
raise RuntimeError(
"Encounter Error in >>make_dataset.prepare_trading_dataset<<... \
Did not catch any news.") from None
else:
df['log_ret_1d'] = np.log(df['intraday_close'] / df['intraday_close'].shift(1))
df['log_ret_1w'] = pd.Series(df['log_ret_1d']).rolling(window=5).sum()
df['log_ret_2w'] = pd.Series(df['log_ret_1d']).rolling(window=10).sum()
df['log_ret_3w'] = pd.Series(df['log_ret_1d']).rolling(window=15).sum()
df['log_ret_4w'] = pd.Series(df['log_ret_1d']).rolling(window=20).sum()
df['log_ret_8w'] = pd.Series(df['log_ret_1d']).rolling(window=40).sum()
df['log_ret_12w'] = pd.Series(df['log_ret_1d']).rolling(window=60).sum()
df['log_ret_16w'] = pd.Series(df['log_ret_1d']).rolling(window=80).sum()
df['log_ret_20w'] = | pd.Series(df['log_ret_1d']) | pandas.Series |
from datetime import datetime, timedelta
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas.util.testing as tm
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_set_index(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df = df.set_index(idx)
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.set_index(idx[::2])
def test_set_index_cast(self):
# issue casting an index then set_index
df = DataFrame(
{"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]
)
df2 = df.set_index(df.index.astype(np.int32))
tm.assert_frame_equal(df, df2)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop_inplace(self, frame_of_index_cols, drop, inplace, keys):
df = frame_of_index_cols
if isinstance(keys, list):
idx = MultiIndex.from_arrays([df[x] for x in keys], names=keys)
else:
idx = Index(df[keys], name=keys)
expected = df.drop(keys, axis=1) if drop else df
expected.index = idx
if inplace:
result = df.copy()
result.set_index(keys, drop=drop, inplace=True)
else:
result = df.set_index(keys, drop=drop)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append(self, frame_of_index_cols, drop, keys):
df = frame_of_index_cols
keys = keys if isinstance(keys, list) else [keys]
idx = MultiIndex.from_arrays(
[df.index] + [df[x] for x in keys], names=[None] + keys
)
expected = df.drop(keys, axis=1) if drop else df.copy()
expected.index = idx
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
# A has duplicate values, C does not
@pytest.mark.parametrize("keys", ["A", "C", ["A", "B"], ("tuple", "as", "label")])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_append_to_multiindex(self, frame_of_index_cols, drop, keys):
# append to existing multiindex
df = frame_of_index_cols.set_index(["D"], drop=drop, append=True)
keys = keys if isinstance(keys, list) else [keys]
expected = frame_of_index_cols.set_index(["D"] + keys, drop=drop, append=True)
result = df.set_index(keys, drop=drop, append=True)
tm.assert_frame_equal(result, expected)
def test_set_index_after_mutation(self):
# GH1590
df = DataFrame({"val": [0, 1, 2], "key": ["<KEY>"]})
expected = DataFrame({"val": [1, 2]}, Index(["b", "c"], name="key"))
df2 = df.loc[df.index.map(lambda indx: indx >= 1)]
result = df2.set_index("key")
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# Add list-of-list constructor because list is ambiguous -> lambda
# also test index name if append=True (name is duplicate here for B)
@pytest.mark.parametrize(
"box",
[
Series,
Index,
np.array,
list,
lambda x: [list(x)],
lambda x: MultiIndex.from_arrays([x]),
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "B"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_single_array(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
key = box(df["B"])
if box == list:
# list of strings gets interpreted as list of keys
msg = "['one', 'two', 'three', 'one', 'two']"
with pytest.raises(KeyError, match=msg):
df.set_index(key, drop=drop, append=append)
else:
# np.array/list-of-list "forget" the name of B
name_mi = getattr(key, "names", None)
name = [getattr(key, "name", None)] if name_mi is None else name_mi
result = df.set_index(key, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, nothing is dropped
expected = df.set_index(["B"], drop=False, append=append)
expected.index.names = [index_name] + name if append else name
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# also test index name if append=True (name is duplicate here for A & B)
@pytest.mark.parametrize(
"box", [Series, Index, np.array, list, lambda x: MultiIndex.from_arrays([x])]
)
@pytest.mark.parametrize(
"append, index_name",
[(True, None), (True, "A"), (True, "B"), (True, "test"), (False, None)],
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays(
self, frame_of_index_cols, drop, append, index_name, box
):
df = frame_of_index_cols
df.index.name = index_name
keys = ["A", box(df["B"])]
# np.array/list "forget" the name of B
names = ["A", None if box in [np.array, list, tuple, iter] else "B"]
result = df.set_index(keys, drop=drop, append=append)
# only valid column keys are dropped
# since B is always passed as array above, only A is dropped, if at all
expected = df.set_index(["A", "B"], drop=False, append=append)
expected = expected.drop("A", axis=1) if drop else expected
expected.index.names = [index_name] + names if append else names
tm.assert_frame_equal(result, expected)
# MultiIndex constructor does not work directly on Series -> lambda
# We also emulate a "constructor" for the label -> lambda
# also test index name if append=True (name is duplicate here for A)
@pytest.mark.parametrize(
"box2",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"box1",
[
Series,
Index,
np.array,
list,
iter,
lambda x: MultiIndex.from_arrays([x]),
lambda x: x.name,
],
)
@pytest.mark.parametrize(
"append, index_name", [(True, None), (True, "A"), (True, "test"), (False, None)]
)
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_arrays_duplicate(
self, frame_of_index_cols, drop, append, index_name, box1, box2
):
df = frame_of_index_cols
df.index.name = index_name
keys = [box1(df["A"]), box2(df["A"])]
result = df.set_index(keys, drop=drop, append=append)
# if either box is iter, it has been consumed; re-read
keys = [box1(df["A"]), box2(df["A"])]
# need to adapt first drop for case that both keys are 'A' --
# cannot drop the same column twice;
# use "is" because == would give ambiguous Boolean error for containers
first_drop = (
False if (keys[0] is "A" and keys[1] is "A") else drop # noqa: F632
)
# to test against already-tested behaviour, we add sequentially,
# hence second append always True; must wrap keys in list, otherwise
# box = list would be interpreted as keys
expected = df.set_index([keys[0]], drop=first_drop, append=append)
expected = expected.set_index([keys[1]], drop=drop, append=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_pass_multiindex(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
keys = MultiIndex.from_arrays([df["A"], df["B"]], names=["A", "B"])
result = df.set_index(keys, drop=drop, append=append)
# setting with a MultiIndex will never drop columns
expected = df.set_index(["A", "B"], drop=False, append=append)
tm.assert_frame_equal(result, expected)
def test_set_index_verify_integrity(self, frame_of_index_cols):
df = frame_of_index_cols
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index("A", verify_integrity=True)
# with MultiIndex
with pytest.raises(ValueError, match="Index has duplicate keys"):
df.set_index([df["A"], df["A"]], verify_integrity=True)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_keys(self, frame_of_index_cols, drop, append):
df = frame_of_index_cols
with pytest.raises(KeyError, match="['foo', 'bar', 'baz']"):
# column names are A-E, as well as one tuple
df.set_index(["foo", "bar", "baz"], drop=drop, append=append)
# non-existent key in list with arrays
with pytest.raises(KeyError, match="X"):
df.set_index([df["A"], df["B"], "X"], drop=drop, append=append)
msg = "[('foo', 'foo', 'foo', 'bar', 'bar')]"
# tuples always raise KeyError
with pytest.raises(KeyError, match=msg):
df.set_index(tuple(df["A"]), drop=drop, append=append)
# also within a list
with pytest.raises(KeyError, match=msg):
df.set_index(["A", df["A"], tuple(df["A"])], drop=drop, append=append)
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
@pytest.mark.parametrize("box", [set], ids=["set"])
def test_set_index_raise_on_type(self, frame_of_index_cols, box, drop, append):
df = frame_of_index_cols
msg = 'The parameter "keys" may be a column key, .*'
# forbidden type, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(box(df["A"]), drop=drop, append=append)
# forbidden type in list, e.g. set
with pytest.raises(TypeError, match=msg):
df.set_index(["A", df["A"], box(df["A"])], drop=drop, append=append)
# MultiIndex constructor does not work directly on Series -> lambda
@pytest.mark.parametrize(
"box",
[Series, Index, np.array, iter, lambda x: MultiIndex.from_arrays([x])],
ids=["Series", "Index", "np.array", "iter", "MultiIndex"],
)
@pytest.mark.parametrize("length", [4, 6], ids=["too_short", "too_long"])
@pytest.mark.parametrize("append", [True, False])
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_raise_on_len(
self, frame_of_index_cols, box, length, drop, append
):
# GH 24984
df = frame_of_index_cols # has length 5
values = np.random.randint(0, 10, (length,))
msg = "Length mismatch: Expected 5 rows, received array of length.*"
# wrong length directly
with pytest.raises(ValueError, match=msg):
df.set_index(box(values), drop=drop, append=append)
# wrong length in list
with pytest.raises(ValueError, match=msg):
df.set_index(["A", df.A, box(values)], drop=drop, append=append)
def test_set_index_custom_label_type(self):
# GH 24969
class Thing:
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
# necessary for pretty KeyError
__repr__ = __str__
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing("Three", "pink")
msg = "<Thing 'Three'>"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_hashable_iterable(self):
# GH 24969
# actual example discussed in GH 24984 was e.g. for shapely.geometry
# objects (e.g. a collection of Points) that can be both hashable and
# iterable; using frozenset as a stand-in for testing here
class Thing(frozenset):
# need to stabilize repr for KeyError (due to random order in sets)
def __repr__(self):
tmp = sorted(list(self))
# double curly brace prints one brace in format string
return "frozenset({{{}}})".format(", ".join(map(repr, tmp)))
thing1 = Thing(["One", "red"])
thing2 = Thing(["Two", "blue"])
df = DataFrame({thing1: [0, 1], thing2: [2, 3]})
expected = DataFrame({thing1: [0, 1]}, index=Index([2, 3], name=thing2))
# use custom label directly
result = df.set_index(thing2)
tm.assert_frame_equal(result, expected)
# custom label wrapped in list
result = df.set_index([thing2])
tm.assert_frame_equal(result, expected)
# missing key
thing3 = Thing(["Three", "pink"])
msg = r"frozenset\(\{'Three', 'pink'\}\)"
with pytest.raises(KeyError, match=msg):
# missing label directly
df.set_index(thing3)
with pytest.raises(KeyError, match=msg):
# missing label in list
df.set_index([thing3])
def test_set_index_custom_label_type_raises(self):
# GH 24969
# purposefully inherit from something unhashable
class Thing(set):
def __init__(self, name, color):
self.name = name
self.color = color
def __str__(self):
return "<Thing {self.name!r}>".format(self=self)
thing1 = Thing("One", "red")
thing2 = Thing("Two", "blue")
df = DataFrame([[0, 2], [1, 3]], columns=[thing1, thing2])
msg = 'The parameter "keys" may be a column key, .*'
with pytest.raises(TypeError, match=msg):
# use custom label directly
df.set_index(thing2)
with pytest.raises(TypeError, match=msg):
# custom label wrapped in list
df.set_index([thing2])
def test_construction_with_categorical_index(self):
ci = tm.makeCategoricalIndex(10)
ci.name = "B"
# with Categorical
df = DataFrame({"A": np.random.randn(10), "B": ci.values})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# from a CategoricalIndex
df = DataFrame({"A": np.random.randn(10), "B": ci})
idf = df.set_index("B")
tm.assert_index_equal(idf.index, ci)
# round-trip
idf = idf.reset_index().set_index("B")
tm.assert_index_equal(idf.index, ci)
def test_set_index_cast_datetimeindex(self):
df = DataFrame(
{
"A": [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)],
"B": np.random.randn(1000),
}
)
idf = df.set_index("A")
assert isinstance(idf.index, DatetimeIndex)
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
# convert to utc
with tm.assert_produces_warning(FutureWarning):
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = (
"The default of the 'keep_tz' keyword in "
"DatetimeIndex.to_series will change to True in a future "
"release."
)
assert msg in str(m[0].message)
with tm.assert_produces_warning(FutureWarning):
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_set_index_timezone(self):
# GH 12358
# tz-aware Series should retain the tz
idx = to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert("Europe/Rome")
df = DataFrame({"A": idx})
assert df.set_index(idx).index[0].hour == 11
assert DatetimeIndex(Series(df.A))[0].hour == 11
assert df.set_index(df.A).index[0].hour == 11
def test_set_index_dst(self):
di = date_range("2006-10-29 00:00:00", periods=3, freq="H", tz="US/Pacific")
df = DataFrame(data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=di).reset_index()
# single level
res = df.set_index("index")
exp = DataFrame(
data={"a": [0, 1, 2], "b": [3, 4, 5]}, index=Index(di, name="index")
)
tm.assert_frame_equal(res, exp)
# GH 12920
res = df.set_index(["index", "a"])
exp_index = MultiIndex.from_arrays([di, [0, 1, 2]], names=["index", "a"])
exp = DataFrame({"b": [3, 4, 5]}, index=exp_index)
tm.assert_frame_equal(res, exp)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_set_index_multiindexcolumns(self):
columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)])
df = DataFrame(np.random.randn(3, 3), columns=columns)
result = df.set_index(df.columns[0])
expected = df.iloc[:, 1:]
expected.index = df.iloc[:, 0].values
expected.index.names = [df.columns[0]]
tm.assert_frame_equal(result, expected)
def test_set_index_empty_column(self):
# GH 1971
df = DataFrame(
[
{"a": 1, "p": 0},
{"a": 2, "m": 10},
{"a": 3, "m": 11, "p": 20},
{"a": 4, "m": 12, "p": 21},
],
columns=("a", "m", "p", "x"),
)
result = df.set_index(["a", "x"])
expected = df[["m", "p"]]
expected.index = MultiIndex.from_arrays([df["a"], df["x"]], names=["a", "x"])
tm.assert_frame_equal(result, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_rename(self, float_frame):
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
renamed = float_frame.rename(columns=mapping)
renamed2 = float_frame.rename(columns=str.lower)
tm.assert_frame_equal(renamed, renamed2)
tm.assert_frame_equal(
renamed2.rename(columns=str.upper), float_frame, check_names=False
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = DataFrame(data)
renamed = df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["foo", "bar"]))
renamed = df.rename(index=str.upper)
tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"]))
# have to pass something
with pytest.raises(TypeError, match="must pass an index to rename"):
float_frame.rename()
# partial columns
renamed = float_frame.rename(columns={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"]))
# other axis
renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"})
tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"]))
# index with name
index = Index(["foo", "bar"], name="name")
renamer = DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name"))
assert renamed.index.name == renamer.index.name
def test_rename_axis_inplace(self, float_frame):
# GH 15704
expected = float_frame.rename_axis("foo")
result = float_frame.copy()
no_return = result.rename_axis("foo", inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
expected = float_frame.rename_axis("bar", axis=1)
result = float_frame.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
assert no_return is None
tm.assert_frame_equal(result, expected)
def test_rename_axis_raises(self):
# https://github.com/pandas-dev/pandas/issues/17833
df = DataFrame({"A": [1, 2], "B": [1, 2]})
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis({0: 10, 1: 20}, axis=0)
with pytest.raises(ValueError, match="Use `.rename`"):
df.rename_axis(id, axis=1)
with pytest.raises(ValueError, match="Use `.rename`"):
df["A"].rename_axis(id)
def test_rename_axis_mapper(self):
# GH 19978
mi = MultiIndex.from_product([["a", "b", "c"], [1, 2]], names=["ll", "nn"])
df = DataFrame(
{"x": [i for i in range(len(mi))], "y": [i * 10 for i in range(len(mi))]},
index=mi,
)
# Test for rename of the Index object of columns
result = df.rename_axis("cols", axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="cols"))
# Test for rename of the Index object of columns using dict
result = result.rename_axis(columns={"cols": "new"}, axis=1)
tm.assert_index_equal(result.columns, Index(["x", "y"], name="new"))
# Test for renaming index using dict
result = df.rename_axis(index={"ll": "foo"})
assert result.index.names == ["foo", "nn"]
# Test for renaming index using a function
result = df.rename_axis(index=str.upper, axis=0)
assert result.index.names == ["LL", "NN"]
# Test for renaming index providing complete list
result = df.rename_axis(index=["foo", "goo"])
assert result.index.names == ["foo", "goo"]
# Test for changing index and columns at same time
sdf = df.reset_index().set_index("nn").drop(columns=["ll", "y"])
result = sdf.rename_axis(index="foo", columns="meh")
assert result.index.name == "foo"
assert result.columns.name == "meh"
# Test different error cases
with pytest.raises(TypeError, match="Must pass"):
df.rename_axis(index="wrong")
with pytest.raises(ValueError, match="Length of names"):
df.rename_axis(index=["wrong"])
with pytest.raises(TypeError, match="bogus"):
df.rename_axis(bogus=None)
@pytest.mark.parametrize(
"kwargs, rename_index, rename_columns",
[
({"mapper": None, "axis": 0}, True, False),
({"mapper": None, "axis": 1}, False, True),
({"index": None}, True, False),
({"columns": None}, False, True),
({"index": None, "columns": None}, True, True),
({}, False, False),
],
)
def test_rename_axis_none(self, kwargs, rename_index, rename_columns):
# GH 25034
index = Index(list("abc"), name="foo")
columns = Index(["col1", "col2"], name="bar")
data = np.arange(6).reshape(3, 2)
df = DataFrame(data, index, columns)
result = df.rename_axis(**kwargs)
expected_index = index.rename(None) if rename_index else index
expected_columns = columns.rename(None) if rename_columns else columns
expected = DataFrame(data, expected_index, expected_columns)
tm.assert_frame_equal(result, expected)
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns)
#
# without specifying level -> across all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"]
)
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
tm.assert_index_equal(renamed.index, new_index)
tm.assert_index_equal(renamed.columns, new_columns)
assert renamed.index.names == df.index.names
assert renamed.columns.names == df.columns.names
#
# with specifying a level (GH13766)
# dict
new_columns = MultiIndex.from_tuples(
[("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"]
)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# function
func = str.upper
new_columns = MultiIndex.from_tuples(
[("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, new_columns)
new_columns = MultiIndex.from_tuples(
[("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"]
)
renamed = df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, new_columns)
renamed = df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, new_columns)
# index
new_index = MultiIndex.from_tuples(
[("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"]
)
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(renamed.index, new_index)
def test_rename_nocopy(self, float_frame):
renamed = float_frame.rename(columns={"C": "foo"}, copy=False)
renamed["foo"] = 1.0
assert (float_frame["C"] == 1.0).all()
def test_rename_inplace(self, float_frame):
float_frame.rename(columns={"C": "foo"})
assert "C" in float_frame
assert "foo" not in float_frame
c_id = id(float_frame["C"])
float_frame = float_frame.copy()
float_frame.rename(columns={"C": "foo"}, inplace=True)
assert "C" not in float_frame
assert "foo" in float_frame
assert id(float_frame["foo"]) != c_id
def test_rename_bug(self):
# GH 5344
# rename set ref_locs, and set_index was not resetting
df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]})
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
expected = DataFrame(
[[1], [2]],
index=MultiIndex.from_tuples(
[("foo", "bah"), ("bar", "bas")], names=["a", "b"]
),
columns=["2001-01-01"],
)
tm.assert_frame_equal(df, expected)
def test_rename_bug2(self):
# GH 19497
# rename was changing Index to MultiIndex if Index contained tuples
df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"])
df = df.rename({(1, 1): (5, 4)}, axis="index")
expected = DataFrame(
data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"]
)
tm.assert_frame_equal(df, expected)
def test_rename_errors_raises(self):
df = DataFrame(columns=["A", "B", "C", "D"])
with pytest.raises(KeyError, match="'E'] not found in axis"):
df.rename(columns={"A": "a", "E": "e"}, errors="raise")
@pytest.mark.parametrize(
"mapper, errors, expected_columns",
[
({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]),
({"A": "a"}, "raise", ["a", "B", "C", "D"]),
(str.lower, "raise", ["a", "b", "c", "d"]),
],
)
def test_rename_errors(self, mapper, errors, expected_columns):
# GH 13473
# rename now works with errors parameter
df = DataFrame(columns=["A", "B", "C", "D"])
result = df.rename(columns=mapper, errors=errors)
expected = DataFrame(columns=expected_columns)
tm.assert_frame_equal(result, expected)
def test_reorder_levels(self):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
# no change, position
result = df.reorder_levels([0, 1, 2])
tm.assert_frame_equal(df, result)
# no change, labels
result = df.reorder_levels(["L0", "L1", "L2"])
tm.assert_frame_equal(df, result)
# rotate, position
result = df.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
tm.assert_frame_equal(result, expected)
result = df.reorder_levels(["L0", "L0", "L0"])
tm.assert_frame_equal(result, expected)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match="Level E "):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted["time"].dtype == np.float64
resetted = df.reset_index()
assert resetted["time"].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ["x", "y", "z"]
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(
vals,
Index(idx, name="a"),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index()
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill="blah")
xp = DataFrame(
full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
df = DataFrame(
vals,
MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index("a")
xp = DataFrame(
full,
Index([0, 1, 2], name="d"),
columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill=None)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill="blah", col_level=1)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame(
{"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]})
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{
"A": ["a", "b", "c"],
"B": [np.nan, np.nan, np.nan],
"C": np.random.rand(3),
}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH5818
#
df = DataFrame(
[[1, 2], [3, 4]],
columns=date_range("1/1/2013", "1/2/2013"),
index=["A", "B"],
)
result = df.reset_index()
expected = DataFrame(
[["A", 1, 2], ["B", 3, 4]],
columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)],
)
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH 12071
df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame(
[[0, 0, 0], [1, 1, 1]],
columns=["index", "A", "B"],
index=RangeIndex(stop=2),
)
tm.assert_frame_equal(result, expected)
def test_set_index_names(self):
df = tm.makeDataFrame()
df.index.name = "name"
assert df.set_index(df.index).index.names == ["name"]
mi = MultiIndex.from_arrays(df[["A", "B"]].T.values, names=["A", "B"])
mi2 = MultiIndex.from_arrays(
df[["A", "B", "A", "B"]].T.values, names=["A", "B", "C", "D"]
)
df = df.set_index(["A", "B"])
assert df.set_index(df.index).index.names == ["A", "B"]
# Check that set_index isn't converting a MultiIndex into an Index
assert isinstance(df.set_index(df.index).index, MultiIndex)
# Check actual equality
tm.assert_index_equal(df.set_index(df.index).index, mi)
idx2 = df.index.rename(["C", "D"])
# Check that [MultiIndex, MultiIndex] yields a MultiIndex rather
# than a pair of tuples
assert isinstance(df.set_index([df.index, idx2]).index, MultiIndex)
# Check equality
tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2)
def test_rename_objects(self, float_string_frame):
renamed = float_string_frame.rename(columns=str.upper)
assert "FOO" in renamed
assert "foo" not in renamed
def test_rename_axis_style(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"])
expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="columns")
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis=1)
tm.assert_frame_equal(result, expected)
result = df.rename({"A": "a", "B": "b"}, axis="columns")
tm.assert_frame_equal(result, expected)
# Index
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
result = df.rename(str.lower, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename(str.lower, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis=0)
tm.assert_frame_equal(result, expected)
result = df.rename({"X": "x", "Y": "y"}, axis="index")
tm.assert_frame_equal(result, expected)
result = df.rename(mapper=str.lower, axis="index")
tm.assert_frame_equal(result, expected)
def test_rename_mapper_multi(self):
df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index(
["A", "B"]
)
result = df.rename(str.upper)
expected = df.rename(index=str.upper)
tm.assert_frame_equal(result, expected)
def test_rename_positional_named(self):
# https://github.com/pandas-dev/pandas/issues/12392
df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"])
result = df.rename(str.lower, columns=str.upper)
expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"])
tm.assert_frame_equal(result, expected)
def test_rename_axis_style_raises(self):
# see gh-12392
df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"])
# Named target and axis
over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=1)
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(columns=str.lower, axis="columns")
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(index=str.lower, axis=0)
# Multiple targets and axis
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, axis="columns")
# Too many targets
over_spec_msg = "Cannot specify all of 'mapper', 'index', 'columns'."
with pytest.raises(TypeError, match=over_spec_msg):
df.rename(str.lower, str.lower, str.lower)
# Duplicates
with pytest.raises(TypeError, match="multiple values"):
df.rename(id, mapper=id)
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_rename_positional(self):
df = DataFrame(columns=["A", "B"])
with tm.assert_produces_warning(FutureWarning) as rec:
result = df.rename(None, str.lower)
expected = DataFrame(columns=["a", "b"])
tm.assert_frame_equal(result, expected)
assert len(rec) == 1
message = str(rec[0].message)
assert "rename" in message
assert "Use named arguments" in message
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_ambiguous_warns(self):
df = DataFrame({"A": [1, 2]})
with tm.assert_produces_warning(FutureWarning):
df.rename(id, id)
with tm.assert_produces_warning(FutureWarning):
df.rename({0: 10}, {"A": "B"})
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
def test_droplevel(self):
# GH20342
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
df = df.set_index([0, 1]).rename_axis(["a", "b"])
df.columns = MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
# test that dropping of a level in index works
expected = df.reset_index("a", drop=True)
result = df.droplevel("a", axis="index")
tm.assert_frame_equal(result, expected)
# test that dropping of a level in columns works
expected = df.copy()
expected.columns = Index(["c", "d"], name="level_1")
result = df.droplevel("level_2", axis="columns")
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = s
df["C"] = np.array(s)
df["D"] = s.values
df["E"] = np.array(s.values)
assert is_categorical_dtype(df["B"])
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"])
assert is_interval_dtype(df["D"].cat.categories)
assert | is_object_dtype(df["C"]) | pandas.core.dtypes.common.is_object_dtype |
import threading
import numpy as np
import pandas as pd
import pytest
from pymilvus_orm import Index
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
prefix = "insert"
exp_name = "name"
exp_schema = "schema"
exp_num = "num_entities"
exp_primary = "primary"
default_schema = cf.gen_default_collection_schema()
default_binary_schema = cf.gen_default_binary_collection_schema()
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
default_binary_index_params = {"index_type": "BIN_IVF_FLAT", "metric_type": "JACCARD", "params": {"nlist": 64}}
class TestInsertParams(TestcaseBase):
""" Test case of Insert interface """
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_non_data_type(self, request):
if isinstance(request.param, list) or request.param is None:
pytest.skip("list and None type is valid data type")
yield request.param
@pytest.fixture(scope="module", params=ct.get_invalid_strs)
def get_invalid_field_name(self, request):
if isinstance(request.param, (list, dict)):
pytest.skip()
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dataframe_data(self):
"""
target: test insert DataFrame data
method: 1.create 2.insert dataframe data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
df = cf.gen_default_dataframe_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=df)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == df[ct.default_int64_field_name].values.tolist()
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_list_data(self):
"""
target: test insert list-like data
method: 1.create 2.insert list data
expected: assert num entities
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data(ct.default_nb)
mutation_res, _ = collection_w.insert(data=data)
assert mutation_res.insert_count == ct.default_nb
assert mutation_res.primary_keys == data[0]
assert collection_w.num_entities == ct.default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_non_data_type(self, get_non_data_type):
"""
target: test insert with non-dataframe, non-list data
method: insert with data (non-dataframe and non-list type)
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "Data type is not support"}
collection_w.insert(data=get_non_data_type, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.parametrize("data", [[], pd.DataFrame()])
def test_insert_empty_data(self, data):
"""
target: test insert empty data
method: insert empty
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
error = {ct.err_code: 0, ct.err_msg: "The data fields number is not match with schema"}
collection_w.insert(data=data, check_task=CheckTasks.err_res, check_items=error)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_dataframe_only_columns(self):
"""
target: test insert with dataframe just columns
method: dataframe just have columns
expected: num entities is zero
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
columns = [ct.default_int64_field_name, ct.default_float_vec_field_name]
df = | pd.DataFrame(columns=columns) | pandas.DataFrame |
import re
from collections import OrderedDict
import numpy as np
import pandas as pd
from clickhouse_driver.client import Client as _DriverClient
from pkg_resources import parse_version
import ibis.common as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.schema as sch
import ibis.expr.types as ir
from ibis.clickhouse.compiler import ClickhouseDialect, build_ast
from ibis.client import Database, DatabaseEntity, Query, SQLClient
from ibis.config import options
from ibis.sql.compiler import DDL
from ibis.util import log
fully_qualified_re = re.compile(r"(.*)\.(?:`(.*)`|(.*))")
base_typename_re = re.compile(r"(\w+)")
_clickhouse_dtypes = {
'Null': dt.Null,
'Nothing': dt.Null,
'UInt8': dt.UInt8,
'UInt16': dt.UInt16,
'UInt32': dt.UInt32,
'UInt64': dt.UInt64,
'Int8': dt.Int8,
'Int16': dt.Int16,
'Int32': dt.Int32,
'Int64': dt.Int64,
'Float32': dt.Float32,
'Float64': dt.Float64,
'String': dt.String,
'FixedString': dt.String,
'Date': dt.Date,
'DateTime': dt.Timestamp,
}
_ibis_dtypes = {v: k for k, v in _clickhouse_dtypes.items()}
_ibis_dtypes[dt.String] = 'String'
class ClickhouseDataType:
__slots__ = 'typename', 'nullable'
def __init__(self, typename, nullable=False):
m = base_typename_re.match(typename)
base_typename = m.groups()[0]
if base_typename not in _clickhouse_dtypes:
raise com.UnsupportedBackendType(typename)
self.typename = base_typename
self.nullable = nullable
def __str__(self):
if self.nullable:
return 'Nullable({})'.format(self.typename)
else:
return self.typename
def __repr__(self):
return '<Clickhouse {}>'.format(str(self))
@classmethod
def parse(cls, spec):
# TODO(kszucs): spare parsing, depends on clickhouse-driver#22
if spec.startswith('Nullable'):
return cls(spec[9:-1], nullable=True)
else:
return cls(spec)
def to_ibis(self):
return _clickhouse_dtypes[self.typename](nullable=self.nullable)
@classmethod
def from_ibis(cls, dtype, nullable=None):
typename = _ibis_dtypes[type(dtype)]
if nullable is None:
nullable = dtype.nullable
return cls(typename, nullable=nullable)
@dt.dtype.register(ClickhouseDataType)
def clickhouse_to_ibis_dtype(clickhouse_dtype):
return clickhouse_dtype.to_ibis()
class ClickhouseDatabase(Database):
pass
class ClickhouseQuery(Query):
def _external_tables(self):
tables = []
for name, df in self.extra_options.get('external_tables', {}).items():
if not isinstance(df, pd.DataFrame):
raise TypeError(
'External table is not an instance of pandas ' 'dataframe'
)
schema = sch.infer(df)
chtypes = map(ClickhouseDataType.from_ibis, schema.types)
structure = list(zip(schema.names, map(str, chtypes)))
tables.append(
dict(
name=name, data=df.to_dict('records'), structure=structure
)
)
return tables
def execute(self):
cursor = self.client._execute(
self.compiled_sql, external_tables=self._external_tables()
)
result = self._fetch(cursor)
return self._wrap_result(result)
def _fetch(self, cursor):
data, colnames, _ = cursor
if not len(data):
# handle empty resultset
return | pd.DataFrame([], columns=colnames) | pandas.DataFrame |
#!/usr/bin/env python
"""
Parses SPINS' EA log files into BIDS tsvs.
Usage:
dm_parse_ea.py [options] <study>
Arguments:
<study> A datman study to parse task data for.
Options:
--experiment <experiment> Single datman session to generate TSVs for
--timings <timing_path> The full path to the EA timings file.
Defaults to the 'EA-timing.csv' file in
the assets folder.
--lengths <lengths_path> The full path to the file containing the
EA vid lengths. Defaults to the
'EA-vid-lengths.csv' in the assets folder.
--regex <regex> The regex to use to find the log files to
parse. [default: *UCLAEmpAcc*]
--debug Set log level to debug
"""
import re
import os
import glob
import logging
import pandas as pd
import numpy as np
from docopt import docopt
import datman.config
import datman.scanid
logging.basicConfig(
level=logging.WARN, format="[%(name)s] %(levelname)s: %(message)s"
)
logger = logging.getLogger(os.path.basename(__file__))
# reads in log file and subtracts the initial TRs/MRI startup time
def read_in_logfile(path):
log_file = pd.read_csv(path, sep="\t", skiprows=3)
time_to_subtract = int(log_file.Duration[log_file.Code == "MRI_start"])
log_file.Time = log_file.Time - time_to_subtract
return log_file
# Remove the rating when there is a scanner response during the task instead of just at the start
def clean_logfile(log_file):
scan_response = ["101", "104"]
# 1st list of indexes to remove scan responses and ratings in the dataframe
indexes_to_drop = []
# Remove the rating that come after the scan response when there is a 102/103 response right before or after
# Also remove the rating that come after scan response and carry over to the next video
# The rating is always registered two indexes after the scan response
for index, row in log_file.iterrows():
if ("rating" in log_file["Code"][index]) and any(
resp in log_file["Code"][index - 2] for resp in scan_response
):
# index to select the rating to drop
indexes_to_drop.append(index)
# index - 2 to select the scan response to drop
indexes_to_drop.append(index - 2)
if len(indexes_to_drop) == 0:
log_file_cleaned = log_file
else:
log_file_cleaned = log_file.drop(log_file.index[indexes_to_drop])
log_file_cleaned = log_file_cleaned.reset_index(drop=True)
logger.warning(
f"Removed {len(indexes_to_drop)/2} registered rating occurred before or after actual rating"
)
# 2nd list of indexes to drop the remaining scan responses and ratings
indexes_to_drop_1 = []
# Remove the remaining rating response come right after scan response
# The rating is registered one index after the scan response
for index, row in log_file_cleaned.iterrows():
if ("rating" in log_file_cleaned["Code"][index]) and any(
resp in log_file_cleaned["Code"][index - 1]
for resp in scan_response
):
# index to select the remaining rating to drop
indexes_to_drop_1.append(index)
# index - 1 select the remaing scan response to drop
indexes_to_drop_1.append(index - 1)
if len(indexes_to_drop_1) == 0:
final_log_file = log_file_cleaned
else:
final_log_file = log_file_cleaned.drop(
log_file_cleaned.index[indexes_to_drop_1]
)
final_log_file = final_log_file.reset_index(drop=True)
logger.warning(
f"Removed {len(indexes_to_drop_1)/2} rating registered followed scanner responses"
)
return final_log_file
# Grabs the starts of blocks and returns rows for them
def get_blocks(log, vid_info):
# identifies the video trial types (as opposed to button press events etc)
mask = ["vid" in log["Code"][i] for i in range(0, log.shape[0])]
df = pd.DataFrame(
{
"onset": log.loc[mask]["Time"],
"trial_type": log.loc[mask]["Event Type"],
"movie_name": log.loc[mask]["Code"],
}
)
df["trial_type"] = df["movie_name"].apply(
lambda x: "circle_block" if "cvid" in x else "EA_block"
)
df["duration"] = df["movie_name"].apply(
lambda x: int(vid_info[x]["duration"]) * 10000
if x in vid_info
else pd.NA
)
df["stim_file"] = df["movie_name"].apply(
lambda x: vid_info[x]["stim_file"] if x in vid_info else pd.NA
)
df["end"] = df["onset"] + df["duration"]
return df
def format_vid_info(vid):
vid.columns = [c.lower() for c in vid.columns]
vid = vid.rename(index={0: "stim_file", 1: "duration"})
vid = vid.to_dict()
return vid
def read_in_standard(timing_path):
df = pd.read_csv(timing_path).astype(str)
df.columns = [c.lower() for c in df.columns]
df_dict = df.drop([0, 0]).reset_index(drop=True).to_dict(orient="list")
return df_dict
def get_series_standard(gold_standard, block_name):
return [float(x) for x in gold_standard[block_name] if x != "nan"]
def get_ratings(log):
rating_mask = ["rating" in log["Code"][i] for i in range(0, log.shape[0])]
df = pd.DataFrame(
{
"onset": log["Time"].loc[rating_mask].values,
"participant_value": log.loc[rating_mask]["Code"].values,
"event_type": "button_press",
"duration": 0,
}
)
# Pull rating value from formatted string
df["participant_value"] = df["participant_value"].str.strip().str[-1]
return df
def combine_dfs(blocks, ratings):
# combines the block rows with the ratings rows and sorts them
combo = blocks.append(ratings).sort_values("onset").reset_index(drop=True)
mask = pd.notnull(combo["trial_type"])
combo["space_b4_prev"] = combo["onset"].diff(periods=1)
combo["first_button_press"] = combo["duration"].shift() > 0
combo2 = combo.drop(
combo[
(combo["space_b4_prev"] < 1000)
& (combo["first_button_press"] == True)
].index
).reset_index(drop=True)
mask = pd.notnull(combo2["trial_type"])
block_start_locs = combo2[mask].index.values
last_block = combo2.iloc[block_start_locs[len(block_start_locs) - 1]]
end_row = {
"onset": last_block.end,
"rating_duration": 0,
"event_type": "last_row",
"duration": 0,
"participant_value": last_block.participant_value,
}
combo2 = combo2.append(end_row, ignore_index=True).reset_index(drop=True)
mask = | pd.notnull(combo2["trial_type"]) | pandas.notnull |
#%%
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import numpy.random as random
import gzip
import csv
import connectome_tools.process_matrix as pm
import connectome_tools.process_graph as pg
import connectome_tools.celltype as ct
from tqdm import tqdm
from joblib import Parallel, delayed
import networkx as nx
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# load adjacency matrix, graphs, and pairs
adj = pm.Promat.pull_adj('ad', subgraph='brain and accessory')
pairs = pm.Promat.get_pairs()
ad_edges = pd.read_csv('data/edges_threshold/ad_all-paired-edges.csv', index_col=0)
ad_edges_split = pd.read_csv('data/edges_threshold/pairwise-threshold_ad_all-edges.csv', index_col=0)
graph = pg.Analyze_Nx_G(ad_edges)
graph_split = pg.Analyze_Nx_G(ad_edges_split, split_pairs=True)
pairs = pm.Promat.get_pairs()
# %%
# calculate shortest paths
dVNC_pair_ids = list(pm.Promat.load_pairs_from_annotation('mw dVNC', pairs, return_type='all_pair_sorted').leftid)
dSEZ_pair_ids = list(pm.Promat.load_pairs_from_annotation('mw dSEZ', pairs, return_type='all_pair_sorted').leftid)
RGN_pair_ids = list(pm.Promat.load_pairs_from_annotation('mw RGN', pairs, return_type='all_pair_sorted').leftid)
target_names = ['dVNC', 'dSEZ', 'RGN']
targets = [dVNC_pair_ids, dSEZ_pair_ids, RGN_pair_ids]
sensories_names = ['olfactory', 'gustatory-external', 'gustatory-pharyngeal', 'enteric', 'thermo-warm', 'thermo-cold', 'visual', 'noci', 'mechano-Ch', 'mechano-II/III', 'proprio', 'respiratory']
sensories_skids = [ct.Celltype_Analyzer.get_skids_from_meta_annotation(f'mw {name}') for name in sensories_names]
sensories_pair_ids = [pm.Promat.load_pairs_from_annotation(annot='', pairList=pairs, return_type='all_pair_ids', skids=celltype, use_skids=True) for celltype in sensories_skids]
all_sensories = [x for sublist in sensories_pair_ids for x in sublist]
all_sensories = list(np.intersect1d(all_sensories, graph.G.nodes))
dVNC_pair_ids = list(np.intersect1d(dVNC_pair_ids, graph.G.nodes))
cutoff=10
shortest_paths = []
for i in range(len(all_sensories)):
sens_shortest_paths = []
for j in range(len(dVNC_pair_ids)):
try:
shortest_path = nx.shortest_path(graph.G, all_sensories[i], dVNC_pair_ids[j])
sens_shortest_paths.append(shortest_path)
except:
print(f'probably no path exists from {all_sensories[i]}-{dVNC_pair_ids[j]}')
shortest_paths.append(sens_shortest_paths)
all_shortest_paths = [x for sublist in shortest_paths for x in sublist]
# %%
# calculate crossing per path
graph_crossings = pg.Prograph.crossing_counts(graph.G, all_shortest_paths)
control_hists = []
total_paths = len(graph_crossings)
binwidth = 1
x_range = list(range(0, 7))
data = graph_crossings
bins = np.arange(min(data), max(data) + binwidth + 0.5) - 0.5
hist = np.histogram(data, bins=bins)
for hist_pair in zip(hist[0], hist[0]/total_paths, [x for x in range(len(hist[0]))], ['control']*len(hist[0]), [0]*len(hist[0])):
control_hists.append(hist_pair)
control_hists = | pd.DataFrame(control_hists, columns = ['count', 'fraction', 'bin', 'condition', 'repeat']) | pandas.DataFrame |
from datetime import date
from datetime import datetime
from time import strptime
import logging
import numpy as np #type: ignore
import pandas as pd #type: ignore
from pandas.api.types import is_numeric_dtype
from pandas.api.types import is_timedelta64_dtype
import re
from typing import (
Any,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Pattern,
Set,
Tuple,
Union,
)
logger = logging.getLogger(__name__)
# add_xl_formula() {{{1
def add_xl_formula(df: pd.DataFrame,
column_name: str = 'xl_calc',
formula: str = '=CONCATENATE(A{row}, B{row}, C{row})',
offset: int = 2) -> pd.DataFrame:
'''add Excel (xl) formula column
Parameters
----------
df
pandas dataframe
column_name
the column name to be associated with the column formula values, default
'xl_calc'
formula
Excel formula to be applied. As an example:
.. code-block::
'=CONCATENATE(A{row}, B{row}, C{row})'
where {row} is the defined replacement variable which will be replaced
with actual individual row value.
offset
starting row value, default = 2 (resultant xl sheet includes headers)
Examples
--------
.. code-block::
formula = '=CONCATENATE(A{row}, B{row}, C{row})'
add_xl_formula(df, column_name='X7', formula=formula)
Returns
-------
pandas dataframe
'''
col_values = []
for x in range(offset, df.shape[0] + offset):
repl_str = re.sub('{ROW}', str(x), string=formula, flags=re.I)
col_values.append(repl_str)
df[column_name] = col_values
return df
# duration {{{1
def duration(s1: pd.Series,
s2: pd.Series = None,
unit: Union[str, None] = None,
round: Union[bool, int] = 2,
freq: str = 'd') -> pd.Series:
''' calculate duration between two columns (series)
Parameters
----------
s1
'from' datetime series
s2
'to' datetime series.
Default None. If None, defaults to today.
interval
default None - returns timedelta in days
'd' - days as an integer,
'years' (based on 365.25 days per year),
'months' (based on 30 day month)
Other possible options are:
- ‘W’, ‘D’, ‘T’, ‘S’, ‘L’, ‘U’, or ‘N’
- ‘days’ or ‘day’
- ‘hours’, ‘hour’, ‘hr’, or ‘h’
- ‘minutes’, ‘minute’, ‘min’, or ‘m’
- ‘seconds’, ‘second’, or ‘sec’
- ‘milliseconds’, ‘millisecond’, ‘millis’, or ‘milli’
- ‘microseconds’, ‘microsecond’, ‘micros’, or ‘micro’-
- ‘nanoseconds’, ‘nanosecond’, ‘nanos’, ‘nano’, or ‘ns’.
check out pandas
`timedelta object <https://pandas.pydata.org/docs/reference/api/pandas.Timedelta.html>`_
for details.
round
Default False. If duration result is an integer and this
parameter contains a positive integer, the result is round to this
decimal precision.
freq
Default is 'd'(days). If the duration result is a pd.Timedelta dtype,
the value can be 'rounded' using this frequency parameter.
Must be a fixed frequency like 'S' (second) not 'ME' (month end).
For a list of valid values, check out
`pandas offset aliases <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
Returns
-------
series
if unit is None - series is of data type timedelta64[ns]
otherwise series of type int.
Examples
--------
.. code-block::
%%piper
sample_data()
>> select(['-countries', '-regions', '-ids', '-values_1', '-values_2'])
>> assign(new_date_col=pd.to_datetime('2018-01-01'))
>> assign(duration = lambda x: duration(x.new_date_col, x.order_dates, unit='months'))
>> assign(duration_dates_age = lambda x: duration(x['dates']))
>> head(tablefmt='plain')
dates rder_dates new_date_col duration duration_dates_age
0 2020-01-01 2020-01-07 2018-01-01 25 452 days
1 2020-01-02 2020-01-08 2018-01-01 25 451 days
2 2020-01-03 2020-01-09 2018-01-01 25 450 days
3 2020-01-04 2020-01-10 2018-01-01 25 449 days
'''
if s2 is None:
s2 = datetime.today()
if unit is None:
result = s2 - s1
elif unit == 'years':
result = ((s2 - s1) / pd.Timedelta(365.25, 'd'))
elif unit == 'months':
result = ((s2 - s1) / pd.Timedelta(30, 'd'))
else:
result = ((s2 - s1)) / | pd.Timedelta(1, unit) | pandas.Timedelta |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from mars.dataframe.datasource.dataframe import from_pandas
from mars.dataframe.datasource.series import from_pandas as series_from_pandas
from mars.dataframe.merge import concat
from mars.dataframe.utils import sort_dataframe_inplace
def test_merge(setup):
df1 = pd.DataFrame(np.arange(20).reshape((4, 5)) + 1, columns=['a', 'b', 'c', 'd', 'e'])
df2 = pd.DataFrame(np.arange(20).reshape((5, 4)) + 1, columns=['a', 'b', 'x', 'y'])
df3 = df1.copy()
df3.index = pd.RangeIndex(2, 6, name='index')
df4 = df1.copy()
df4.index = pd.MultiIndex.from_tuples([(i, i + 1) for i in range(4)], names=['i1', 'i2'])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
mdf3 = from_pandas(df3, chunk_size=3)
mdf4 = from_pandas(df4, chunk_size=2)
# Note [Index of Merge]
#
# When `left_index` and `right_index` of `merge` is both false, pandas will generate an RangeIndex to
# the final result dataframe.
#
# We chunked the `left` and `right` dataframe, thus every result chunk will have its own RangeIndex.
# When they are contenated we don't generate a new RangeIndex for the result, thus we cannot obtain the
# same index value with pandas. But we guarantee that the content of dataframe is correct.
# merge on index
expected0 = df1.merge(df2)
jdf0 = mdf1.merge(mdf2)
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected0, 0), sort_dataframe_inplace(result0, 0))
# merge on left index and `right_on`
expected1 = df1.merge(df2, how='left', right_on='x', left_index=True)
jdf1 = mdf1.merge(mdf2, how='left', right_on='x', left_index=True)
result1 = jdf1.execute().fetch()
expected1.set_index('a_x', inplace=True)
result1.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected1, 0), sort_dataframe_inplace(result1, 0))
# merge on `left_on` and right index
expected2 = df1.merge(df2, how='right', left_on='a', right_index=True)
jdf2 = mdf1.merge(mdf2, how='right', left_on='a', right_index=True)
result2 = jdf2.execute().fetch()
expected2.set_index('a', inplace=True)
result2.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected2, 0), sort_dataframe_inplace(result2, 0))
# merge on `left_on` and `right_on`
expected3 = df1.merge(df2, how='left', left_on='a', right_on='x')
jdf3 = mdf1.merge(mdf2, how='left', left_on='a', right_on='x')
result3 = jdf3.execute().fetch()
expected3.set_index('a_x', inplace=True)
result3.set_index('a_x', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected3, 0), sort_dataframe_inplace(result3, 0))
# merge on `on`
expected4 = df1.merge(df2, how='right', on='a')
jdf4 = mdf1.merge(mdf2, how='right', on='a')
result4 = jdf4.execute().fetch()
expected4.set_index('a', inplace=True)
result4.set_index('a', inplace=True)
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected4, 0), sort_dataframe_inplace(result4, 0))
# merge on multiple columns
expected5 = df1.merge(df2, how='inner', on=['a', 'b'])
jdf5 = mdf1.merge(mdf2, how='inner', on=['a', 'b'])
result5 = jdf5.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected5, 0), sort_dataframe_inplace(result5, 0))
# merge when some on is index
expected6 = df3.merge(df2, how='inner', left_on='index', right_on='a')
jdf6 = mdf3.merge(mdf2, how='inner', left_on='index', right_on='a')
result6 = jdf6.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected6, 0), sort_dataframe_inplace(result6, 0))
# merge when on is in MultiIndex
expected7 = df4.merge(df2, how='inner', left_on='i1', right_on='a')
jdf7 = mdf4.merge(mdf2, how='inner', left_on='i1', right_on='a')
result7 = jdf7.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected7, 0), sort_dataframe_inplace(result7, 0))
# merge when on is in MultiIndex, and on not in index
expected8 = df4.merge(df2, how='inner', on=['a', 'b'])
jdf8 = mdf4.merge(mdf2, how='inner', on=['a', 'b'])
result8 = jdf8.execute().fetch()
pd.testing.assert_frame_equal(sort_dataframe_inplace(expected8, 0), sort_dataframe_inplace(result8, 0))
def test_join(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], index=['a1', 'a2', 'a3'])
df2 = pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], index=['a1', 'b2', 'b3']) + 1
df2 = pd.concat([df2, df2 + 1])
mdf1 = from_pandas(df1, chunk_size=2)
mdf2 = from_pandas(df2, chunk_size=2)
# default `how`
expected0 = df1.join(df2, lsuffix='l_', rsuffix='r_')
jdf0 = mdf1.join(mdf2, lsuffix='l_', rsuffix='r_')
result0 = jdf0.execute().fetch()
pd.testing.assert_frame_equal(expected0.sort_index(), result0.sort_index())
# how = 'left'
expected1 = df1.join(df2, how='left', lsuffix='l_', rsuffix='r_')
jdf1 = mdf1.join(mdf2, how='left', lsuffix='l_', rsuffix='r_')
result1 = jdf1.execute().fetch()
pd.testing.assert_frame_equal(expected1.sort_index(), result1.sort_index())
# how = 'right'
expected2 = df1.join(df2, how='right', lsuffix='l_', rsuffix='r_')
jdf2 = mdf1.join(mdf2, how='right', lsuffix='l_', rsuffix='r_')
result2 = jdf2.execute().fetch()
pd.testing.assert_frame_equal(expected2.sort_index(), result2.sort_index())
# how = 'inner'
expected3 = df1.join(df2, how='inner', lsuffix='l_', rsuffix='r_')
jdf3 = mdf1.join(mdf2, how='inner', lsuffix='l_', rsuffix='r_')
result3 = jdf3.execute().fetch()
pd.testing.assert_frame_equal(expected3.sort_index(), result3.sort_index())
# how = 'outer'
expected4 = df1.join(df2, how='outer', lsuffix='l_', rsuffix='r_')
jdf4 = mdf1.join(mdf2, how='outer', lsuffix='l_', rsuffix='r_')
result4 = jdf4.execute().fetch()
pd.testing.assert_frame_equal(expected4.sort_index(), result4.sort_index())
def test_join_on(setup):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]], columns=['a1', 'a2', 'a3'])
df2 = | pd.DataFrame([[1, 2, 3], [1, 5, 6], [7, 8, 9]], columns=['a1', 'b2', 'b3']) | pandas.DataFrame |
import os
import glob
import pandas as pd
def computeMACD(price):
exp1 = price.ewm(span=12, adjust=False).mean()
exp2 = price.ewm(span=26, adjust=False).mean()
macd = exp1 - exp2
return macd
#########################################################
# Compute Relative Strength Index
# BE AWARE THAT PRICE HAS TO BE SORTED CHRONOLOGICALLY
#########################################################
def computeRSI(price, days=14):
price = list(price)
# determine if the price is higher or lower than the last one
price_diff = [0] + [p - price[i - 1]
for i, p in enumerate(price[1:])]
price_diff = pd.Series(price_diff)
diff_rolling_window = price_diff.rolling(window=days)
# Compute mean of bullish and bearish days
RSI = []
for i, period in enumerate(list(diff_rolling_window)):
bulish_days = []
bearish_days = []
for j, day in enumerate(list(period)):
if day > 0: # bullish
bulish_days.append(day)
elif day <= 0:
bearish_days.append(day)
try:
avg_gain = sum(bulish_days) / len(bulish_days)
except ZeroDivisionError:
avg_gain = 0
try:
avg_loss = sum(bearish_days) / len(bearish_days)
except ZeroDivisionError:
avg_loss = 0
avg_gain = avg_gain / days
avg_loss = abs(avg_loss / days)
try:
RSI.append(100 - (100 / (1 + (avg_gain / avg_loss))))
except ZeroDivisionError:
RSI.append(0)
return RSI
def main():
init_folder = os.getcwd() + '\\data'
final_folder = os.getcwd() + '\\data_clean'
if not os.path.exists(final_folder):
os.mkdir(final_folder)
for f in glob.glob(init_folder + '\\*.csv'):
name = f.split('\\')[-1]
print(f"Processing {name}...")
df = | pd.read_csv(f) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 09:02:55 2019
@author: 67135099
"""
##reads all the files in the current working directory, make sure only input files are in folder
import os
import pandas as pd
os.chdir("D:/EEC_canadian_hourly/ccrp.tor.ec.gc.ca/NAS_ClimateData_FlatFiles/HLY/HLY01/1953")
files = [ f for f in os.listdir( os.curdir ) if os.path.isfile(f) ]
files
firstFile = files[0]
firstFile
####################################################################################################
#Now we can combine all of the data from all of the files into a single data frame
giantListOfDictionaries = []
for currentFile in files:
with open(currentFile, "r") as theFirstFile:
for line in theFirstFile:
field1 = line[0:7]
field2 = line[7:11]
field3 = line[11:13]
field4 = line[13:15]
field5 = line[15:18]
field6 = line[18:25]
field7 = line[25:32]
field8 = line[32:39]
field9 = line[39:46]
field10 = line[46:53]
field11= line[53:60]
field12 = line[60:67]
field13 = line[67:74]
field14 = line[74:81]
field15 = line[81:88]
field16 = line[88:95]
field17 = line[95:102]
field18 = line[102:109]
field19 = line[109:116]
field20 = line[116:123]
field21 = line[123:130]
field22 = line[130:137]
field23 = line[137:144]
field24= line[144:151]
field25 = line[151:158]
field26 = line[158:165]
field27 = line[165:172]
field28 = line[172:179]
field29 = line[179:186]
currentDictionary = {"File_name": currentFile,'Station_ID': field1,
"Year": field2,"Month": field3,"Day": field4,"Variable": field5,"1":field6,
"2":field7,"3":field8,"4":field9,"5":field10,"6":field11,"7":field12,"8":field13
,"9":field14,"10":field15,"11":field16,"12":field17,"13":field18,"14":field19,"15":field20
,"16":field21,"17":field22,"18":field23,"19":field24,"20":field25,"21":field26,"22":field27
,"23":field28,"00":field29}
giantListOfDictionaries.append(currentDictionary)
#create a dataframe from dictionary
df = pd.DataFrame(giantListOfDictionaries)
#Delete the unwanted first column File name
df=df.drop("File_name",axis=1)
df1=df.loc[df["Variable"] == "073"]
##add_source_id
df1["Source_ID"] = "341"
df1["Elevation"] = "-9999"
df1 = df1.reindex(columns=['Station_ID', "Year","Month","Day","Variable","1",
"2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","00"])
df1["Elevation"] = "-9999"
#merge with station details files
#######################
df2=pd.read_csv("Stations.csv")
df1 = df1.astype(str)
df2 = df2.astype(str)
#match up columns in output with station name files and add station names NB diff
#sources for smame station with diff ids
result = df2.merge(df1, on=['Station_ID'])
result['Station_ID2'] = result['Station_ID']
del result ["Station_ID"]
result= result.rename(columns=({'Station_ID2':'Station_ID'}))
result["Source_ID"] = "341"
result["Elevation"] = "-9999"
result = result.replace({"-99999M":"",})
#result.to_csv("mslp_1953.csv",index=False)
################################################################################
##write out separate pipe delimited files by station id to working directory namd by station_id+ variable name+sourceid need to change with each new source
os.chdir(r"D:/EEC_canadian_hourly/2000_10/mslp")
cats = sorted(result['Station_ID'].unique())
for cat in cats:
outfilename = cat + "_sea_level_pressure_2009.csv"
print(outfilename)
result[result["Station_ID"] == cat].to_csv(outfilename,sep=',',index=False)
###########################################################station_pressure
df1=df.loc[df["Variable"] == "077"]
##add_source_id
df1["Source_ID"] = "341"
df1["Elevation"] = "-9999"
df1 = df1.reindex(columns=['Station_ID', "Year","Month","Day","Variable","1",
"2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","00"])
df1["Elevation"] = "-9999"
#merge with station details files
df2=pd.read_csv("D:/EEC_canadian_hourly/ccrp.tor.ec.gc.ca/NAS_ClimateData_FlatFiles/HLY/HLY01/1953/Stations.csv")
df1 = df1.astype(str)
df2 = df2.astype(str)
#match up columns in output with station name files and add station names NB diff
#sources for smame station with diff ids
result = df2.merge(df1, on=['Station_ID'])
result['Station_ID2'] = result['Station_ID']
del result ["Station_ID"]
result= result.rename(columns=({'Station_ID2':'Station_ID'}))
result["Source_ID"] = "341"
result["Elevation"] = "-9999"
result = result.replace({"-99999M":"",})
##write out separate pipe delimited files by station id to working directory namd by station_id+ variable name+sourceid need to change with each new source
os.chdir(r"D:/EEC_canadian_hourly/2000_10/slp")
cats = sorted(result['Station_ID'].unique())
for cat in cats:
outfilename = cat + "_station_pressure_2009.csv"
print(outfilename)
result[result["Station_ID"] == cat].to_csv(outfilename,sep=',',index=False)
###########################################################wind_speed
df1=df.loc[df["Variable"] == "076"]
##add_source_id
df1["Source_ID"] = "341"
df1["Elevation"] = "-9999"
df1 = df1.reindex(columns=['Station_ID', "Year","Month","Day","Variable","1",
"2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","00"])
df1["Elevation"] = "-9999"
#merge with station details files
df2=pd.read_csv("D:/EEC_canadian_hourly/ccrp.tor.ec.gc.ca/NAS_ClimateData_FlatFiles/HLY/HLY01/1953/Stations.csv")
df1 = df1.astype(str)
df2 = df2.astype(str)
#match up columns in output with station name files and add station names NB diff
#sources for smame station with diff ids
result = df2.merge(df1, on=['Station_ID'])
result['Station_ID2'] = result['Station_ID']
del result ["Station_ID"]
result= result.rename(columns=({'Station_ID2':'Station_ID'}))
result["Source_ID"] = "341"
result["Elevation"] = "-9999"
result = result.replace({"-99999M":"",})
##write out separate pipe delimited files by station id to working directory namd by station_id+ variable name+sourceid need to change with each new source
os.chdir(r"D:/EEC_canadian_hourly/2000_10/ws")
cats = sorted(result['Station_ID'].unique())
for cat in cats:
outfilename = cat + "_wind_speed_2009.csv"
print(outfilename)
result[result["Station_ID"] == cat].to_csv(outfilename,sep=',',index=False)
###########################################################dew_poeint_temperature
df1=df.loc[df["Variable"] == "074"]
##add_source_id
df1["Source_ID"] = "341"
df1["Elevation"] = "-9999"
df1 = df1.reindex(columns=['Station_ID', "Year","Month","Day","Variable","1",
"2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","00"])
df1["Elevation"] = "-9999"
#merge with station details files
df2=pd.read_csv("D:/EEC_canadian_hourly/ccrp.tor.ec.gc.ca/NAS_ClimateData_FlatFiles/HLY/HLY01/1953/Stations.csv")
df1 = df1.astype(str)
df2 = df2.astype(str)
#match up columns in output with station name files and add station names NB diff
#sources for smame station with diff ids
result = df2.merge(df1, on=['Station_ID'])
result['Station_ID2'] = result['Station_ID']
del result ["Station_ID"]
result= result.rename(columns=({'Station_ID2':'Station_ID'}))
result["Source_ID"] = "341"
result["Elevation"] = "-9999"
result = result.replace({"-99999M":"",})
##write out separate pipe delimited files by station id to working directory namd by station_id+ variable name+sourceid need to change with each new source
os.chdir(r"D:/EEC_canadian_hourly/2000_10/dpt")
cats = sorted(result['Station_ID'].unique())
for cat in cats:
outfilename = cat + "_dew_point_temperature_2009.csv"
print(outfilename)
result[result["Station_ID"] == cat].to_csv(outfilename,sep=',',index=False)
###########################################################temperature
df1=df.loc[df["Variable"] == "078"]
##add_source_id
df1["Source_ID"] = "341"
df1["Elevation"] = "-9999"
df1 = df1.reindex(columns=['Station_ID', "Year","Month","Day","Variable","1",
"2","3","4","5","6","7","8","9","10","11","12","13","14","15",
"16","17","18","19","20","21","22","23","00"])
df1["Elevation"] = "-9999"
#merge with station details files
df2= | pd.read_csv("D:/EEC_canadian_hourly/ccrp.tor.ec.gc.ca/NAS_ClimateData_FlatFiles/HLY/HLY01/1953/Stations.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 15 18:21:53 2019
@author: Hassan
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
from sklearn import linear_model
from sklearn.tree import DecisionTreeRegressor
da=pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\Number 1\\All metrics.csv")
Estimated= | pd.read_csv("F:\\Projects\\Master\\Statistical learning\\project\\Number 1\\Estimated range.csv") | pandas.read_csv |
"""Scraping and parsing amazon"""
__author__ = 'thor'
import os
from ut.util.importing import get_environment_variable
import ut as ms
import ut.dacc.mong.util
import pandas as pd
import numpy as np
import requests
import re
from BeautifulSoup import BeautifulSoup as bs3_BeautifulSoup
from datetime import timedelta
from datetime import datetime
from pymongo import MongoClient
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as dates
from ut.serialize.s3 import S3
import tempfile
from ut.viz.util import insert_nans_in_x_and_y_when_there_is_a_gap_in_x
import pylab
class Amazon(object):
url_template = dict()
url_template['product_page'] = 'http://www.amazon.{country}/dp/{asin}/'
url_template['product_reviews'] = 'http://www.amazon.{country}/product-reviews/{asin}/'
regexp = dict()
regexp['nreviews_re'] = {'com': re.compile('\d[\d,]*(?= customer review)'),
'co.uk': re.compile('\d[\d,]*(?= customer review)'),
'in': re.compile('\d[\d,]*(?= customer review)'),
'de': re.compile('\d[\d\.]*(?= Kundenrezens\w\w)')}
regexp['no_reviews_re'] = {'com': re.compile('no customer reviews'),
'co.uk': re.compile('no customer reviews'),
'in': re.compile('no customer reviews'),
'de': re.compile('Noch keine Kundenrezensionen')}
# regexp['average_rating_re'] = {'com': re.compile('')}
default = dict()
default['country'] = 'com'
# default['requests_kwargs'] = {}
default['requests_kwargs'] = {
'proxies': {'http': 'http://us.proxymesh.com:31280'},
'auth': requests.auth.HTTPProxyAuth(get_environment_variable('PROXYMESH_USER'),
get_environment_variable('PROXYMESH_PASS'))
}
@classmethod
def url(cls, what='product_page', **kwargs):
kwargs = dict(Amazon.default, **kwargs)
return cls.url_template[what].format(**kwargs)
return r.text
@classmethod
def slurp(cls, what='product_page', **kwargs):
kwargs = dict(Amazon.default, **kwargs)
r = requests.get(Amazon.url(what=what, **kwargs), **Amazon.default['requests_kwargs'])
if r.status_code == 200:
return r.text
else: # try again and return no matter what
r = requests.get(Amazon.url(what=what, **kwargs), **Amazon.default['requests_kwargs'])
return r.text
# @classmethod
# def get_dynamic_book_info(cls, asin, **kwargs):
# html = Amazon.slurp(what='product_page', **kwargs)
# b = bs3_BeautifulSoup(b)
@classmethod
def get_info(cls, asin, country='co.uk', **kwargs):
info = {'date': datetime.now()}
info = dict(info, **{'sales_ranks': cls.get_sales_rank(asin, country='co.uk', **kwargs)})
# info = dict(info, **{'num_of_reviews': cls.get_number_of_reviews(asin, country='co.uk', **kwargs)})
return info
@classmethod
def get_sales_rank(cls, **kwargs):
html = Amazon.slurp(what='product_page', **kwargs)
sales_rank = [Amazon.parse_sales_rank(html, **kwargs)]
sales_rank += Amazon.parse_sales_sub_rank(html, **kwargs)
return sales_rank
@classmethod
def parse_product_title(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
return b.find('span', attrs={'id': 'productTitle'}).text
@classmethod
def parse_sales_rank(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('li', attrs={'id': re.compile('SalesRank')})
sales_rank_re = re.compile('(\d[\d,]+) in ([\w\ ]+)')
tt = sales_rank_re.findall(t.text)
return {'sales_rank': int(re.compile('\D').sub('', tt[0][0])),
'sales_rank_category': tt[0][1].strip(' ')}
@classmethod
def parse_sales_sub_rank(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('li', attrs={'id': re.compile('SalesRank')})
tt = t.findAll('li', 'zg_hrsr_item')
sales_sub_rank = list()
for tti in tt:
d = dict()
d['sales_rank'] = int(re.compile('\D').sub('', tti.find('span', 'zg_hrsr_rank').text))
ttt = tti.find('span', 'zg_hrsr_ladder')
ttt = ttt.text.split(' ')[1]
d['sales_rank_category'] = ttt.split('>')
sales_sub_rank.append(d)
return sales_sub_rank
@classmethod
def parse_avg_rating(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('span', 'reviewCountTextLinkedHistogram')
return float(re.compile('[\d\.]+').findall(t['title'])[0])
@classmethod
def parse_product_title(cls, b, **kwargs):
if not isinstance(b, bs3_BeautifulSoup):
b = bs3_BeautifulSoup(b)
t = b.find('div', attrs={'id': 'title'})
return t.find('span', attrs={'id': 'productTitle'}).text
@staticmethod
def test_rating_scrape_with_vanessas_book():
html = Amazon.slurp(what='product_page', country_ext='.co.uk', asin='1857886127')
@staticmethod
def get_number_of_reviews(asin, country, **kwargs):
url = 'http://www.amazon.{country}/product-reviews/{asin}'.format(country=country, asin=asin)
html = requests.get(url).text
try:
return int(re.compile('\D').sub('', Amazon.regexp['nreviews_re'][country].search(html).group(0)))
except Exception:
if Amazon.regexp['no_reviews_re'][country].search(html):
return 0
else:
return None # to distinguish from 0, and handle more cases if necessary
class AmazonBookWatch(object):
default = dict()
default['product_list'] = [
{'title': 'The Nanologues', 'asin': '9350095173'},
{'title': 'Never mind the bullocks', 'asin': '1857886127'},
{'title': 'The Other Side of Paradise', 'asin': '1580055311'}
]
default['watch_list'] = [
{'title': 'The Nanologues', 'asin': '9350095173', 'country': 'in'},
{'title': 'The Nanologues', 'asin': '9350095173', 'country': 'co.uk'},
{'title': 'The Nanologues', 'asin': '9350095173', 'country': 'com'},
{'title': 'Never mind the bullocks', 'asin': '1857886127', 'country': 'in'},
{'title': 'Never mind the bullocks', 'asin': '1857886127', 'country': 'co.uk'},
{'title': 'Never mind the bullocks', 'asin': '1857886127', 'country': 'com'},
{'title': 'The Other Side of Paradise', 'asin': '1580055311', 'country': 'com'},
{'title': 'The Other Side of Paradise', 'asin': '1580055311', 'country': 'co.uk'},
{'title': "Heaven's Harlots (Paperback)", 'asin': '0688170129', 'country': 'com'},
{'title': "Heaven's Harlots (Hardcover)", 'asin': '0688155049', 'country': 'com'},
{'title': "Women on Ice", 'asin': '0813554594', 'country': 'com'}
]
default['frequency_in_hours'] = 1
default['max_date_ticks'] = 200
default['stats_num_of_days'] = 1
default['figheight'] = 3
default['figwidth'] = 14
default['linewidth'] = 3
default['tick_font_size'] = 13
default['label_fontsize'] = 13
default['title_fontsize'] = 15
default['line_style'] = '-bo'
default['facecolor'] = 'blue'
default['save_format'] = 'png'
default['dpi'] = 40
default['book_info_html_template'] = '''<hr>
<h3>{book_title} - {country} - {num_of_reviews} reviews </h3>
'''
default['category_html'] = '<img style="box-shadow: 3px 3px 5px 6px #ccc;" src={image_url}>'
db = MongoClient()['misc']['book_watch']
def __init__(self, **kwargs):
self.s3 = S3(bucket_name='public-ut-images', access_key='ut')
attribute_name = 'product_list'
setattr(self, attribute_name, kwargs.get(attribute_name, None) or AmazonBookWatch.default[attribute_name])
attribute_name = 'watch_list'
setattr(self, attribute_name, kwargs.get(attribute_name, None) or AmazonBookWatch.default[attribute_name])
def asin_of_title(self, title):
the_map = {k: v for k, v in zip([x['title'] for x in self.product_list], [x['asin'] for x in self.product_list])}
return the_map[title]
def get_book_statuses(self):
now = datetime.now()
info_list = list()
for book in self.watch_list:
try:
info = dict({'date': now}, **book)
info = dict(info, **{'sale_ranks': Amazon.get_sales_rank(**book)})
info = dict(info, **{'num_of_reviews': Amazon.get_number_of_reviews(**book)})
info_list.append(info)
except Exception:
continue
return info_list
@staticmethod
def cursor_to_df(cursor):
d = ms.dacc.mong.util.to_df(cursor, 'sale_ranks')
d = process_sales_rank_category(d)
return d
@staticmethod
def get_min_max_sales_rank_dates(book_info):
cumul = list()
for x in list(book_info['sales_rank'].values()):
try:
cumul += x['data']['date'].tolist()
except Exception:
raise
return [np.min(cumul), np.max(cumul)]
def mk_book_info(self, title, country, **kwargs):
book_info = dict()
kwargs = dict(kwargs, **self.default)
d = AmazonBookWatch.cursor_to_df(self.db.find(spec={'title': title, 'country': country})
.sort([('_id', -1)]).limit(kwargs['max_date_ticks']))
book_info['num_reviews'] = np.max(d['num_of_reviews'])
book_info['sales_rank'] = dict()
d = d[['date', 'sales_rank_category', 'sales_rank_subcategory', 'sales_rank']]
categories = np.unique(d['sales_rank_category'])
for c in categories:
dd = d[d['sales_rank_category'] == c].sort('date', ascending=True)
book_info['sales_rank'][c] = dict()
book_info['sales_rank'][c]['sales_rank_subcategory'] = dd['sales_rank_subcategory'].iloc[0]
dd = dd[['date', 'sales_rank']]
book_info['sales_rank'][c]['data'] = dd
ddd = dd[dd['date'] > datetime.now() - timedelta(days=kwargs['stats_num_of_days'])]
book_info['sales_rank'][c]['rank_stats'] = pd.DataFrame([{
'hi_rank': np.min(ddd['sales_rank']),
'mean_rank': np.round(np.mean(ddd['sales_rank'])),
'lo_rank': np.max(ddd['sales_rank'])
}])
book_info['sales_rank'][c]['rank_stats'] = \
book_info['sales_rank'][c]['rank_stats'][['hi_rank', 'mean_rank', 'lo_rank']]
book_info['commun_date_range'] = self.get_min_max_sales_rank_dates(book_info)
return book_info
def mk_sales_rank_plot(self, d, category='', save_filename=True, **kwargs):
kwargs = dict(kwargs, **self.default)
if isinstance(d, dict):
if 'sales_rank' in list(d.keys()):
d = d['sales_rank'][category]['data']
elif category in list(d.keys()):
d = d[category]['data']
elif 'data' in list(d.keys()):
d = d['data']
else:
raise ValueError('Your dict must have a "data" key or a %s key' % category)
d = d.sort('date')
x = [xx.to_datetime() for xx in d['date']]
y = list(d['sales_rank'])
gap_thresh = timedelta(seconds=kwargs['frequency_in_hours'] * 4.1 * 3600)
x, y = insert_nans_in_x_and_y_when_there_is_a_gap_in_x(x, y, gap_thresh=gap_thresh)
fig, ax = plt.subplots(1)
fig.set_figheight(kwargs['figheight'])
fig.set_figwidth(kwargs['figwidth'])
ax.plot(x, y, kwargs['line_style'], linewidth=kwargs['linewidth'])
commun_date_range = kwargs.get('commun_date_range', None)
if commun_date_range:
pylab.xlim(kwargs['commun_date_range'])
ax.fill_between(x, y, max(y), facecolor=kwargs['facecolor'], alpha=0.5)
# plt.ylabel('Amazon (%s) Sales Rank' % category, fontsize=kwargs['label_fontsize'])
plot_title = kwargs.get('plot_title', 'Amazon (%s) Sales Rank' % category)
plt.title(plot_title, fontsize=kwargs['title_fontsize'])
plt.tick_params(axis='y', which='major', labelsize=kwargs['tick_font_size'])
# plt.tick_params(axis='x', which='major', labelsize=kwargs['tick_font_size'])
plt.tick_params(axis='x', which='minor', labelsize=kwargs['tick_font_size'])
plt.gca().invert_yaxis()
# ax.xaxis.set_minor_locator(dates.WeekdayLocator(byweekday=(1), interval=1))
ax.xaxis.set_minor_locator(dates.DayLocator(interval=1))
ax.xaxis.set_minor_formatter(dates.DateFormatter('%a\n%d %b'))
ax.xaxis.grid(True, which="minor")
ax.yaxis.grid()
ax.xaxis.set_major_locator(dates.MonthLocator())
# ax.xaxis.set_major_formatter(dates.DateFormatter('\n\n\n%b\n%Y'))
plt.tight_layout()
if save_filename:
if isinstance(save_filename, str):
save_filename = save_filename + '.' + kwargs['save_format']
else: # save to temp file
save_filename = tempfile.NamedTemporaryFile().name
plt.savefig(save_filename, format=kwargs['save_format'], dpi=kwargs['dpi'])
return save_filename
else:
return None
def mk_book_info_html(self, title, country, **kwargs):
kwargs = dict(kwargs, **self.default)
book_info = self.mk_book_info(title, country, **kwargs)
html = kwargs['book_info_html_template'].format(
book_title=title,
country=country,
num_of_reviews=book_info['num_reviews']
)
html = html + "<br>\n"
for category in list(book_info['sales_rank'].keys()):
# make and save a graph, send to s3, and return a url for it
file_name = self.mk_sales_rank_plot(
d=book_info['sales_rank'],
category=category, save_filename=True,
commun_date_range=book_info['commun_date_range'],
plot_title='Amazon.%s (%s) Sales Rank' % (
country, book_info['sales_rank'][category]['sales_rank_subcategory']),
**kwargs
)
s3_key_name = '{title} - {country} - {category} - {date}.png'.format(
title=title,
country=country,
category=category,
date=datetime.now().strftime('%Y%m%d')
)
self.s3.dumpf(file_name, s3_key_name)
image_url = self.s3.get_http_for_key(s3_key_name)
html = html + kwargs['category_html'].format(
image_url=image_url
) + "<br>\n"
# html = html + "\n<br>"
return html
def mk_html_report(self, title_country_list=None):
title_country_list = title_country_list or [
{'title': 'Never mind the bullocks', 'country': 'co.uk'},
{'title': 'Never mind the bullocks', 'country': 'com'},
{'title': 'The Nanologues', 'country': 'in'}
]
html = ''
html += 'Stats of the last 24 hours:<br>'
d = pd.DataFrame()
for title_country in title_country_list:
title = title_country['title']
country = title_country['country']
book_info = self.mk_book_info(title=title, country=country)
for category in list(book_info['sales_rank'].keys()):
dd = pd.concat([ | pd.DataFrame([{'title': title, 'country': country, 'category': category}]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 15:45:51 2018
@author: AyushRastogi
"""
import sqlite3
import pandas as pd # data processing and csv file IO library
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns # python graphing library
plt.style.use('seaborn')
sns.set(style="white", color_codes=True)
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4, suppress=True)
# plt.rcdefaults() # resest to default matplotlib parameters
import warnings #ignore unwanted messages
warnings.filterwarnings("ignore")
import os
os.path
os.getcwd() # Get the default working directory
path = r'C:\Users\AyushRastogi\OneDrive\Meetup2\Ayush_Meetup2'
os.chdir(path)
# Setting up the connections
conn = sqlite3.connect(r'C:\Users\AyushRastogi\OneDrive\Meetup2\Ayush_Meetup2\WY_Production.sqlite')
cur = conn.cursor()
# Merge the data with the fracfocus database
conn2 = sqlite3.connect(r'C:\Users\AyushRastogi\OneDrive\Meetup2\Meetup1\FracFocus.sqlite')
cur2 = conn2.cursor()
# Connections to the two databases complete
# SQL Query - Data from database converted to a dataframe
data = pd.read_sql_query(''' SELECT * FROM Production;''', conn)
print (data.head(10)) #default head() function prints 5 results
print (data.shape) # in the form of rows x columns
data.columns
data.index
data.describe() # get basic statistics for the dataset, does not include any string type elements
# Number of unique API
data.APINO.nunique() # This gives us 27,742 well records
data_FF = pd.read_sql_query(''' SELECT APINumber AS APINO, TotalBaseWaterVolume, CountyName, CountyNumber FROM FracFocusRegistry WHERE (Statenumber = 49 AND (CountyName = 'Campbell' OR CountyName = 'Converse'));''', conn2)
print (data_FF.head(10)) #default head() function prints 5 results
print (data_FF.shape) # in the form of rows x columns
data_FF.columns
data_FF.index
data_FF.APINO.nunique() # This gives us 654 well records
# Look into the format in which APINumber is included in the Fracfocus Registry
data_FF['APINO']
# API Number Format Manipulation
data['StateCode'] = '4900'
data['Trail_zero'] = '0000'
data['APINO'] = data['StateCode'] + data['APINO'] + data['Trail_zero']
data['APINO']
# Merge the two dataframes based on same API
data = pd.merge(data,data_FF,on = 'APINO')
# Number of unique API - After merging the two databases on API
data.APINO.nunique() # This gives us 233 well records
## Date Manipulation - Convert the date column from string to datetime format
#data['Date'] = pd.to_datetime(data['Date'], infer_datetime_format=True, errors='ignore')
##data['Date'] = pd.to_datetime(data['Date'], errors='ignore')
## filtering the date for production after 2005
#data = data[(data['Date'] > '2005-01-01')]
# Checking if there is any NULL value in the dataset
data.isnull().sum()
data.dropna(axis=0, how='any') # entire row with even a single NA value will be removed - Better option to filter data
# At this point we have 458,703 rows and 12 columns
# Column for Cumulative value, Groupby function can be understood as (Split, Apply Function and Combine)
# Also converting the numbers to float
data['cum_oil'] = data.groupby(['APINO'])['Oil'].apply(lambda x: x.cumsum()).astype(float)
data['cum_gas'] = data.groupby(['APINO'])['Gas'].apply(lambda x: x.cumsum()).astype(float)
data['cum_water'] = data.groupby(['APINO'])['Water'].apply(lambda x: x.cumsum()).astype(float)
data['cum_days'] = data.groupby(['APINO'])['Days'].apply(lambda x: x.cumsum()).astype(float)
# Sorting the table by APINO
data = data.sort_values(['APINO'])
data
# Now we need to add 30-60-90-180 and 365 day production
# Let's just look into the oil for now!
#df = data[['APINO', 'Date', 'Oil', 'cum_oil', 'cum_days']].astype(int)
df = data[['APINO', 'Oil', 'cum_oil', 'Days', 'cum_days']].astype(float) #Removed the 'Date' column, need to have a separate dataframe with datetime like values
df = df.reset_index()
df.index
df = df.sort_values(['index'])
df.to_csv(os.path.join(path,r'Data_Reduced.csv'))
# -----------------------------------------------------------
# Need a loop to change the dataframe name iteratively
time = 60
df['60_Interpol_OIL'] = 0.0
df['60_Interpol_OIL'].astype(str).astype(float)
for count in range(len(df['cum_oil'])):
if (df['cum_days'][count] <= time and df['cum_days'][count+1] > time):
df['60_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(time - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
pd.to_numeric(df['60_Interpol_OIL'], errors='coerce')
df['60_Interpol_OIL'] = df['60_Interpol_OIL'].apply(lambda x: '%.1f' % x).values.tolist()
df[df['60_Interpol_OIL'] != '0.0']
time = 180
df['180_Interpol_OIL'] = 0.0
df['180_Interpol_OIL'].astype(str).astype(float)
for count in range(len(df['cum_oil'])):
if count>=1:
if (df['cum_days'][count] <= time and df['cum_days'][count+1] > time):
df['180_Interpol_OIL'][count] = df['cum_oil'][count-1] + ((df['cum_oil'][count+1]) - df['cum_oil'][count-1])*(time - df['cum_days'][count-1])/(df['cum_days'][count+1]-df['cum_days'][count-1])
| pd.to_numeric(df['180_Interpol_OIL'], errors='coerce') | pandas.to_numeric |
import os
from tkinter import filedialog
from tkinter import *
import pandas as pd
import datetime
# define year of measurements for naming
year = 2014
# Please navigate to folder containing measurement subfolders for specified year
root = Tk()
root.withdraw()
folder = filedialog.askdirectory()
# quick check to see all stations included in that year
#folder = "//igswztwwgszona/Gravity Data Archive/Relative Data/All American Canal/2019-05"
for folders in sorted(os.listdir(folder)):
print(folders)
name_array, time_array, corr_g = [], [], []
user_array, meter_array, date_array = [], [], []
for file in sorted(os.listdir(folder)):
abs_path = os.path.join(folder + '/' + file)
if abs_path[-3:] == 'xls' or abs_path[-3:] == 'XLS':
data_xls = pd.read_excel(abs_path, 'results', index_col=None, usecols=7, dtype='object')
yr = data_xls['Unnamed: 3'][0]
dat = datetime.datetime(int(file[0:4]), int(file[4:6]), int(file[6:8]))
user = data_xls['Unnamed: 2'][3][0]
meter1 = data_xls['Unnamed: 2'][6]
meter2 = str(data_xls['Unnamed: 3'][6])
meter = meter1 + meter2
xl = pd.ExcelFile(abs_path)
for sheet in xl.sheet_names:
if sheet not in ['results', 'tide', 'metertable'] \
and sheet.find('Sheet') == -1 \
and sheet.find('sheet') == -1:
print(sheet)
sheet_data = pd.read_excel(abs_path, sheet, index_col=False)
ctr = 1.0
for i in range(sheet_data.shape[0]):
if type(sheet_data.iloc[i, 1]) == datetime.time and not pd.isna(sheet_data.iloc[i, 10]):
time_array.append(datetime.datetime.combine(dat, sheet_data.iloc[i, 1]))
corr_g.append(sheet_data.iloc[i, 10]/1000)
name_array.append(sheet)
user_array.append(user)
meter_array.append(meter)
data_tuples = list(zip(name_array, user_array, meter_array, time_array, corr_g))
df = | pd.DataFrame(data_tuples, columns=['name', 'user', 'meter', 'date', 'corr_g']) | pandas.DataFrame |
#!/usr/bin/env python
"""Batch process all images of compound eyes and save data.
Assumes the following folder structure:
.\
|--batch_process_images.py
|--pixel_size.csv (optional) # stored pixel-to-distance conv.
|--image_001.jpg
|--image_002.jpg
|...
|--masks\
|--image_001.jpg # white sillouetting mask on black background
|--image_002.jpg
|...
|--image_001_ommatidia.jpg (outcome)
|--image_002_ommatidia.jpg (outcome)
|...
|--ommatidia_data.csv
|--_hidden_file
"""
import os
from scipy import misc
from analysis_tools import *
import pandas as pd
# Custom parameters
# if a list, must be one-to-one with images
BRIGH_PEAK = False # True assumes a bright point for every peak
HIGH_PASS = True # True adds a high-pass filter to the low-pass used in the ODA
SQUARE_LATTICE = True # True assumes only two fundamental gratings
FILE_EXTENSION = ".jpg" # assumes you're only interested in this file extension
# make dictionary to store relevant information
values = {
"filename":[], "eye_area":[], "eye_length":[], "eye_width":[],
"ommatidia_count":[], "ommatidial_diameter":[], "ommatidial_diameter_std":[],
"ommatidial_diameter_fft":[]
}
# load filenames and folders
fns = os.listdir(os.getcwd())
img_fns = [fn for fn in fns if fn.endswith(".jpg")]
img_fns = [fn for fn in img_fns if "ommatidia" not in fn] # omit outcome images
folders = [fn for fn in fns if os.path.isdir(fn)]
folders = [os.path.join(os.getcwd(), f) for f in folders]
# load the mask filenames
mask_fns = os.listdir("./masks")
mask_fns = [os.path.join(os.getcwd(), f) for f in mask_fns]
# load pixel sizes from a local file, if present
pixel_sizes = []
if os.path.exists("pixel_size.csv"):
pixel_sizes_csv = | pd.read_csv("pixel_size.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 28 11:45:57 2018
This script allows to calculate and visualize the Linear Discriminant Analysis applied on csv file recorded during
social fear experiments. First, it reads each recordings, finds the z-scored values, and merges the
recordings that belong to the same experimental day together.
Then merges the experimental days in an unique dataset and creates a label that allows to know each row of this dataset
during which day was recorded. After that it calculates the LDA that allows to check if there are cluster of frames where
the neuronal activity is similar.
The csv file has to follow a specific name convention: 'Vmh' + 'id_numb of mouse' + 'a string (A or SF )' + 'a numb that
identifies the day of recording' + 'a number that identifies the number of recording' + 'a string (beh)'.
@author: penna
"""
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
import os
import matplotlib.patches as patches
import numpy as np
import seaborn as sns
def reading(file_in, delimiter):
"""
Function that reads from csv file, deletes not social contact behaviours and returns a z-scored df.
:param file_in: absolute file path of the file csv
:param delimiter: delimiter of the csv file
:return: a Dataframe that contains the Z-scored df/f values
"""
data_frame = pd.read_csv(file_in, sep=delimiter)
data_frame = data_frame.drop('Frames', 1)
indexes_to_del = []
neurons = []
# 'Attack', 'defense action', 'face the consp', 'Sniff', 'Upright', 'Sniff A/G', 'domination hands on'
# 'Attack', 'Defence', 'Face the Consp', 'Sniff', 'Upright', 'Sniff A/G', 'domination hands on', 'tail rattle', 'Avoid'
beh = ['Attack', 'Defence', 'Face the Consp', 'Sniff', 'Upright', 'Sniff A/G', 'domination hands on', 'tail rattle', 'Avoid']
for i in range(len(data_frame)):
if data_frame['Beh'][i] not in beh:
indexes_to_del.append(i)
data_frame = data_frame.drop('Beh', 1)
for el in data_frame:
neurons.append(el)
data_frame = pd.DataFrame(StandardScaler().fit_transform(data_frame), columns=neurons).drop(data_frame.index[indexes_to_del])
return data_frame
def dataframe_merger_by_day(code_map):
"""
Function that merges all dataframes of the same day together.
:param code_map: dictionary containing all dataframes in the format {day: list of dfs of that day}.
:return: tupla of a list of dataframe and a list of labels
"""
dataframes = []
activities = []
for code, lists in sorted(code_map.items()):
temp_df = pd.DataFrame()
for df in lists:
df = (df.transpose().drop(df.transpose().index[minLen:])).transpose()
if len(temp_df) == 0:
temp_df = df
else:
index = df.index.tolist()
for i in range(len(index)):
index[i] = i + len(temp_df) + 1
df.index = index
temp_df = temp_df.append(df)
dataframes.append(temp_df)
activities.append(code)
return dataframes, activities
def dataframe_merger_and_column_adder(dataframes, activities, column_name):
"""
Function that adds all new_df of each experimental day together, and adds a column that identifies rows by day.
:param dataframes: the list with all dataframes
:param activities: the list containing days of experimental conditions
:param column_name: name of the new column
:return: a tupla containing the new dataframe and a list of all columns of the new dataframe that represent neurons
"""
new_df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import gzip
from collections import Counter
path = 'cyber_application/data/'
file = 'auth_weekly_filtered.gz'
nodes = | pd.read_csv(path+'nodes_subnetwork.csv') | pandas.read_csv |
import pandas as pd
from pprint import pprint
from jellyfish import jaro_distance
import unidecode
from _Classes.PyscopusModified import ScopusModified
from _Classes.Author import Author
from _Classes.Individuals import Student, Egress
from _Classes.Indicators import Indicators
from _Funções_e_Valores.verify_authors import search_authors_list, treat_exceptions
from _Funções_e_Valores._exceptions import scopus_articles_exceptions
from _Funções_e_Valores.values import quadrennium, FILE, HAS_EVENTS, FULL_PERIOD_AUTHORS, REQUEST_SCOPUS_DATA, EGRESS, SCOPUS_APIKEY
class Data():
def __init__(self, professors, egress, students, qualis_2016, qualis_2020, qualis_2016_events, qualis_2020_events):
super(Data, self).__init__()
self.professors = professors
self.egress = egress
self.students = students
self.qualis_2016 = qualis_2016
self.qualis_2020 = qualis_2020
self.qualis_2016_events = qualis_2016_events
self.qualis_2020_events = qualis_2020_events
self.exceptions = {'Nome Trabalho':[], 'Nome Evento Cadastrado':[], 'Nome Evento Canônico':[]} # For the exceptions sheet from the excel file
self.reports = {'Author':[], 'Report':[]} # Reports by author
self.authors_dict = {"Author":[], "A/E":[]} # Dictionary of authors (Professors, Students and Egress)
columns = []
for year in quadrennium:
if year not in columns:
columns.append(year)
for col in columns:
self.authors_dict[f"20{col}"] = []
self.art_prof = pd.DataFrame() # Articles by professor
self.authors_average = [] # List with the "average number of authors per article" of each professor
self.irestritos_2016 = {'Total com trava':None, 'Total sem trava':None, 'Anais com trava':None, 'Anais sem trava':None, 'Periódicos':None}
self.igerais_2016 = {'Total com trava':None, 'Total sem trava':None, 'Anais com trava':None, 'Anais sem trava':None, 'Periódicos':None}
self.authors_indicators_2016 = [] # Indicators of each professor qualis 2016
self.authors_indicators_2019 = [] # Indicators of each professor qualis 2019
self.general_indicators_2016 = [] # Indicators for all professors together qualis 2016
self.general_indicators_2019 = [] # Indicators for all professors together qualis 2019
self.authors_indicators_2016_journals = [] # Indicators of each professor qualis 2016 (Journals)
self.authors_indicators_2019_journals = [] # Indicators of each professor qualis 2019 (Journals)
self.general_indicators_2016_journals = [] # Indicators for all professors together qualis 2016 (Journals)
self.general_indicators_2019_journals = [] # Indicators for all professors together qualis 2019 (Journals)
self.authors_indicators_2016_proceedings = [] # Indicators of each professor qualis 2016 (Proceedings)
self.authors_indicators_2019_proceedings = [] # Indicators of each professor qualis 2019 (Proceedings)
self.general_indicators_2016_proceedings = [] # Indicators for all professors together qualis 2016 (Proceedings)
self.general_indicators_2019_proceedings = [] # Indicators for all professors together qualis 2019 (Proceedings)
self.journals_a1_a4_2019 = None
self.journals_a1_a4_SE_2019 = None
self.journals_a1_a4_2016 = None
self.journals_a1_a4_SE_2016 = None
self.journal_metrics_2019 = None
self.journal_metrics_2016 = None
self.proceedings_metrics_2019 = None
self.proceedings_metrics_2016 = None
def treat_data(self):
# Get the list of egress and students with their names and active-period
egress = Egress(self.egress, quadrennium)
self.egress_list = egress.get_egress_list()
students = Student(self.students, quadrennium)
self.students_list = students.get_students_list()
if HAS_EVENTS == True:
# Lowercase events
for pos, i in enumerate(self.qualis_2016_events['Nome Padrão']):
self.qualis_2016_events['Nome Padrão'][pos] = str(self.qualis_2016_events['Nome Padrão'][pos]).lower()
for pos, i in enumerate(self.qualis_2020_events['Nome Padrão']):
self.qualis_2020_events['Nome Padrão'][pos] = str(self.qualis_2020_events['Nome Padrão'][pos]).lower()
# Remove "-" from ISSN
for i in range(len(self.qualis_2016["ISSN"])):
self.qualis_2016["ISSN"][i] = self.qualis_2016["ISSN"][i].replace("-", "")
for i in range(len(self.qualis_2020["ISSN"])):
self.qualis_2020["ISSN"][i] = self.qualis_2020["ISSN"][i].replace("-", "")
def get_author_period(self, pos):
if FULL_PERIOD_AUTHORS == True:
period = {}
for year in quadrennium:
period[year] = True
else:
period = {}
for year in quadrennium:
period[year] = False
if EGRESS == True:
start = str(self.professors['Ingresso'][pos])[7:]
start = start.replace('-', '')
end = quadrennium[-1] # There's no limit
else:
if FILE == "UFSC 2017-2020":
start = str(self.professors["Início do Vínculo"][pos])[2:4]
else:
start = str(self.professors["Início do Vínculo"][pos])[8:]
end = str(self.professors["Fim do Vínculo"][pos])
if end == "-":
end = quadrennium[-1]
else:
if FILE == "UFSC 2017-2020":
end = str(self.professors["Fim do Vínculo"][pos])[2:4]
else:
end = str(self.professors["Fim do Vínculo"][pos])[8:]
if int(end) > int(quadrennium[-1]):
end = quadrennium[-1]
start_position = None
end_position = None
for pos, key in enumerate(period.keys()): # For each year of the quadrennium
if pos == 0 and int(start) < int(quadrennium[0]): # If the start year is lower than the first year of the quadrennium
start = quadrennium[0]
if key == start:
start_position = pos # The position of the start year on the quadrennium
if key == end:
end_position = pos # The position of the end year on the quadrennium
for pos, key in enumerate(period.keys()):
if int(end) >= int(quadrennium[0]):
if pos >= start_position and pos <= end_position: # The start year, the end year and the years in between are true
period[key] = True
return period
def get_authors_reports(self):
# Iterates through the professors
for pos, professor in enumerate(self.professors["Nome"]):
if str(professor) != 'nan':
professor = str(professor)
period = self.get_author_period(pos) # Get the period of valid publications
author = Author(professor, period, self.qualis_2016, self.qualis_2020, self.qualis_2016_events, self.qualis_2020_events, self.professors, self.authors_dict["Author"])
# print(professor)
# print(pd.DataFrame(author.info))
self.authors_dict["Author"] = author.authors_list # Updates the authors list
self.reports['Author'].append(professor) # Adds the professor to the list of reports
self.reports['Report'].append(pd.DataFrame(author.info)) # Adds the professor's report to the list of reports
self.authors_average.append(author.get_authors_average()) # Adds the "average number of authors per article" to the list of averages
for title in author.exceptions['Nome Trabalho']:
self.exceptions['Nome Trabalho'].append(title)
for event_registered in author.exceptions['Nome Evento Cadastrado']:
self.exceptions['Nome Evento Cadastrado'].append(event_registered)
for canon_event in author.exceptions['Nome Evento Canônico']:
self.exceptions['Nome Evento Canônico'].append(canon_event)
self.exceptions = pd.DataFrame(self.exceptions)
def treat_names(self): # Looks for convergence between names written in different ways and replaces for the right name
egress_names = []
for egress in self.egress_list:
egress_names.append(treat_exceptions(egress.name.strip()))
students_names = []
for student in self.students_list:
students_names.append(treat_exceptions(student.name.strip()))
for pos, report in enumerate(self.reports["Report"]):
# df = pd.DataFrame(report)
# for index, row in df.iterrows():
for index, row in report.iterrows():
for column in row.index:
if "Autor" in str(column): # Goes through the authors columns
if self.reports["Report"][pos][column][index] != " ":
_, self.reports["Report"][pos].loc[index, column] = search_authors_list(self.authors_dict["Author"], str(self.reports["Report"][pos][column][index]))
_, self.reports["Report"][pos].loc[index, column] = search_authors_list(egress_names, str(self.reports["Report"][pos][column][index]))
_, self.reports["Report"][pos].loc[index, column] = search_authors_list(students_names, str(self.reports["Report"][pos][column][index]))
def get_art_prof(self):
for pos, report in enumerate(self.reports["Report"]):
name_column = [self.reports["Author"][pos] for i in range(len(report))] # Generates a column with the name of the author for each article
report_copy = report.copy() # A copy of the report
report_copy.insert(loc=0, column='Nome', value=name_column) # Adds the name_column
if pos == 0:
self.art_prof = report_copy
else:
self.art_prof = pd.concat([self.art_prof, report_copy], ignore_index=True, sort=False) # Puts the reports together, in one dataframe
# Replace "nan" values with " "
for col in self.art_prof.columns:
if "Autor" in col:
for pos, i in enumerate(self.art_prof[col]):
if str(i) == "NaN" or str(i) == "nan":
self.art_prof.loc[pos, col] = " "
def update_authors_dict(self):
egress_names = []
for egress in self.egress_list:
egress_names.append(treat_exceptions(egress.name.strip())) # Gets the egress' name
students_names = []
for student in self.students_list:
students_names.append(treat_exceptions(student.name.strip())) # Gets the student's name
columns = []
for year in quadrennium:
if year not in columns:
columns.append(year)
# Looks for egress or students and marks them with a X in the "A/E" column
for author in self.authors_dict["Author"]:
if author in egress_names or author in students_names:
self.authors_dict["A/E"].append("X")
else:
self.authors_dict["A/E"].append("")
for col in columns:
self.authors_dict[f"20{col}"].append("")
result_df = self.art_prof.apply(lambda x: x.astype(str).str.lower()).drop_duplicates(subset="Título")
publications = self.art_prof.loc[result_df.index]
for index, row in publications.iterrows():
for column in row.index:
if "Autor" in str(column): # Goes through the authors columns
for pos, author in enumerate(self.authors_dict["Author"]):
if author == row[column]:
year = row["Ano"]
if "." in str(year):
year = str(year).replace(".0", "")
year = int(year)
self.authors_dict[str(year)][pos] = "X"
def get_indicators(self):
all_publications = self.artppg.copy()
indicators_2016 = Indicators(self.egress_list, self.students_list, all_publications, "CC 2016", general=True)
gen_ind_2016, gen_ind_2016_journals, gen_ind_2016_proceedings = indicators_2016.get_indicators_2016()
self.general_indicators_2016 = gen_ind_2016
self.general_indicators_2016_journals = gen_ind_2016_journals
self.general_indicators_2016_proceedings = gen_ind_2016_proceedings
self.irestritos_2016 = indicators_2016.irestritos
self.igerais_2016 = indicators_2016.igerais
indicators_2019 = Indicators(self.egress_list, self.students_list, all_publications, "2019", general=True)
gen_ind_2019, gen_ind_2019_journals, gen_ind_2019_proceedings = indicators_2019.get_indicators_2019()
self.general_indicators_2019 = gen_ind_2019
self.general_indicators_2019_journals = gen_ind_2019_journals
self.general_indicators_2019_proceedings = gen_ind_2019_proceedings
self.irestritos_2019 = indicators_2019.irestritos
self.igerais_2019 = indicators_2019.igerais
for report in self.reports["Report"]:
indicators_2016 = Indicators(self.egress_list, self.students_list, report, "CC 2016")
authors_ind_2016, authors_ind_2016_journals, authors_ind_2016_proceedings = indicators_2016.get_indicators_2016()
self.authors_indicators_2016.append(authors_ind_2016)
self.authors_indicators_2016_journals.append(authors_ind_2016_journals)
self.authors_indicators_2016_proceedings.append(authors_ind_2016_proceedings)
indicators_2019 = Indicators(self.egress_list, self.students_list, report, "2019")
authors_ind_2019, authors_ind_2019_journals, authors_ind_2019_proceedings = indicators_2019.get_indicators_2019()
self.authors_indicators_2019.append(authors_ind_2019)
self.authors_indicators_2019_journals.append(authors_ind_2019_journals)
self.authors_indicators_2019_proceedings.append(authors_ind_2019_proceedings)
def analyze_journal_classifications(self, qualis_year):
journals_a1_a4_list = [] # Journals A1-A4
journals_a1_a4_SE = [] # Journals A1-A4 with students and/or egress
journals_a1_b1_list = [] # Journals A1-B1
journals_a1_b1_SE = [] # Journals A1-B1 with students and/or egress
for pos, report in enumerate(self.reports["Report"]):
# Separates by journal classifications
journals = report.loc[report["Tipo"] == "Periódico"] # All the publications in journals
journals_a1 = journals.loc[journals[f"Qualis {qualis_year}"] == "A1"]
journals_a2 = journals.loc[journals[f"Qualis {qualis_year}"] == "A2"]
if qualis_year == "2019":
journals_a3 = journals.loc[journals[f"Qualis {qualis_year}"] == "A3"]
journals_a4 = journals.loc[journals[f"Qualis {qualis_year}"] == "A4"]
journals_a1_a4 = pd.concat([journals_a1, journals_a2, journals_a3, journals_a4], ignore_index=True, sort=False)
# Calculates the amount of articles A1-A4 with and without students/egress
amount_journals_a1_a4 = len(journals_a1_a4.index)
journals_a1_a4_list.append(amount_journals_a1_a4)
indicators = Indicators(self.egress_list, self.students_list, journals_a1_a4, qualis_year)
amount_journals_a1_a4_SE = indicators.get_SE(journals_a1_a4)
journals_a1_a4_SE.append(amount_journals_a1_a4_SE)
elif qualis_year == "CC 2016":
journals_b1 = journals.loc[journals[f"Qualis {qualis_year}"] == "B1"]
journals_a1_b1 = pd.concat([journals_a1, journals_a2, journals_b1], ignore_index=True, sort=False)
# Calculates the amount of articles A1-B1 with and without students/egress
amount_journals_a1_b1 = len(journals_a1_b1.index)
journals_a1_b1_list.append(amount_journals_a1_b1)
indicators = Indicators(self.egress_list, self.students_list, journals_a1_b1, qualis_year)
amount_journals_a1_b1_SE = indicators.get_SE(journals_a1_b1)
journals_a1_b1_SE.append(amount_journals_a1_b1_SE)
if qualis_year == "2019":
return (journals_a1_a4_list, journals_a1_a4_SE)
elif qualis_year == "CC 2016":
return (journals_a1_b1_list, journals_a1_b1_SE)
def analyze_journals(self):
all_publications = self.artppg.copy()
self.journals = all_publications.copy().loc[all_publications["Tipo"] == "Periódico"] # All the publications in journals
self.journals.loc[:, 'Quantidade'] = self.journals["Nome de Publicação"].map(self.journals["Nome de Publicação"].value_counts()) # Calculates the number of times the journal appears and add that number to a column
columns = ["Nome de Publicação", "ISSN/SIGLA", "Qualis CC 2016", "Qualis 2019", "Scopus 2019", "Quantidade"] # The columns we're gonna use
drop_columns = []
for column in self.journals.columns:
if column not in columns:
drop_columns.append(column)
self.journals = self.journals.drop(columns=drop_columns)
self.journals = self.journals.rename(columns={"ISSN/SIGLA": "ISSN"})
self.journals = self.journals.drop_duplicates(subset="ISSN") # Drop all the duplicated journals
def analyze_journal_metrics(self, qualis_year):
journal_metrics = pd.DataFrame(columns=[f"Métrica {qualis_year}", "Qtd.", "Qtd. %"])
if qualis_year == "2019":
journal_metrics[f"Métrica {qualis_year}"] = ["Quantidade de periódicos diferentes", "Quantidade de periódicos A1-A4", "Quantidade de periódicos B1-B4", "Quantidade de periódicos Não Qualis"]
elif qualis_year == "CC 2016":
journal_metrics[f"Métrica {qualis_year}"] = ["Quantidade de periódicos diferentes", "Quantidade de periódicos A1-B1", "Quantidade de periódicos B2-B5", "Quantidade de periódicos Não Qualis"]
amount = []
amount_perc = []
amount.append(len(self.journals.index))
journals_a1 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "A1"]
journals_a2 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "A2"]
if qualis_year == "2019":
journals_a3 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "A3"]
journals_a4 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "A4"]
journals_upperstrata = pd.concat([journals_a1, journals_a2, journals_a3, journals_a4], ignore_index=True, sort=False)
amount.append(len(journals_upperstrata.index))
journals_b1 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B1"]
journals_b2 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B2"]
journals_b3 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B3"]
journals_b4 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B4"]
journals_lowerstrata = pd.concat([journals_b1, journals_b2, journals_b3, journals_b4], ignore_index=True, sort=False)
amount.append(len(journals_lowerstrata.index))
elif qualis_year == "CC 2016":
journals_b1 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B1"]
journals_upperstrata = pd.concat([journals_a1, journals_a2, journals_b1], ignore_index=True, sort=False)
amount.append(len(journals_upperstrata.index))
journals_b2 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B2"]
journals_b3 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B3"]
journals_b4 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B4"]
journals_b5 = self.journals.loc[self.journals[f"Qualis {qualis_year}"] == "B5"]
journals_lowerstrata = pd.concat([journals_b2, journals_b3, journals_b4, journals_b5], ignore_index=True, sort=False)
amount.append(len(journals_lowerstrata.index))
others = self.journals.loc[((self.journals[f"Qualis {qualis_year}"] == "-") | (self.journals[f"Qualis {qualis_year}"] == "NP") | (self.journals[f"Qualis {qualis_year}"] == "C"))]
amount.append(len(others.index))
journal_metrics["Qtd."] = amount
for i in journal_metrics["Qtd."]:
amount_perc.append(f"{round(100/journal_metrics['Qtd.'][0] * i, 1)}%")
journal_metrics["Qtd. %"] = amount_perc
return journal_metrics
def analyze_proceedings(self):
all_publications = self.artppg.copy()
self.proceedings = all_publications.loc[all_publications["Tipo"] == "Anais"]
self.proceedings.loc[:, 'Quantidade'] = self.proceedings["Nome de Publicação"].map(self.proceedings["Nome de Publicação"].value_counts())
columns = ["Nome de Publicação", "ISSN/SIGLA", "Qualis CC 2016", "Qualis 2019", "Scopus 2019", "Quantidade"]
drop_columns = []
for column in self.proceedings.columns:
if column not in columns:
drop_columns.append(column)
self.proceedings = self.proceedings.drop(columns=drop_columns)
self.proceedings = self.proceedings.rename(columns={"ISSN/SIGLA": "SIGLA"})
self.proceedings = self.proceedings.drop_duplicates(subset="SIGLA")
def analyze_proceedings_metrics(self, qualis_year):
proceedings_metrics = pd.DataFrame(columns=[f"Métrica {qualis_year}", "Qtd.", "Qtd. %"])
if qualis_year == "2019":
proceedings_metrics[f"Métrica {qualis_year}"] = ["Quantidade de eventos diferentes", "Quantidade de eventos A1-A4", "Quantidade de eventos B1-B4", "Quantidade de eventos Não Qualis"]
elif qualis_year == "CC 2016":
proceedings_metrics[f"Métrica {qualis_year}"] = ["Quantidade de eventos diferentes", "Quantidade de eventos A1-B1", "Quantidade de eventos B2-B5", "Quantidade de eventos Não Qualis"]
amount = []
amount_perc = []
amount.append(len(self.proceedings.index))
proceedings_a1 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "A1"]
proceedings_a2 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "A2"]
if qualis_year == "2019":
proceedings_a3 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "A3"]
proceedings_a4 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "A4"]
proceedings_a1_a4 = pd.concat([proceedings_a1, proceedings_a2, proceedings_a3, proceedings_a4], ignore_index=True, sort=False)
amount.append(len(proceedings_a1_a4.index))
proceedings_b1 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "B1"]
proceedings_b2 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "B2"]
proceedings_b3 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "B3"]
proceedings_b4 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "B4"]
proceedings_lowerstrata = pd.concat([proceedings_b1, proceedings_b2, proceedings_b3, proceedings_b4], ignore_index=True, sort=False)
amount.append(len(proceedings_lowerstrata.index))
elif qualis_year == "CC 2016":
proceedings_b1 = self.proceedings.loc[self.proceedings[f"Qualis {qualis_year}"] == "B1"]
proceedings_a1_a4 = | pd.concat([proceedings_a1, proceedings_a2, proceedings_b1], ignore_index=True, sort=False) | pandas.concat |
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
import requests
from selenium import webdriver
from time import sleep
def balance(ticker, wd_path):
'''
Uses selenium webdriver to open the Yahoo Finance
balance sheet page and expand all possibles rows.
Then, download the newest information available.
Args:
-------
ticker (str): must use the same ticker as Yahoo Finance
wd_path (str): absolute path to webdriver executable
Returns:
-------
pd.DataFrame: Balance sheets data
'''
# Web page
url = 'https://finance.yahoo.com/quote/{}/balance-sheet?p={}'.format(ticker, ticker)
# Open webdriver
browser = webdriver.Chrome(executable_path=wd_path)
# Open page
browser.get(url)
sleep(2)
# Expand all possible rows
cols = []
# Try clicking in everything possible 5 times
for i in range(5):
rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]')
for r in rows:
col_name = r.find_element_by_css_selector('span')
# Dont click the same col twice
if col_name.text not in cols:
cols.append(col_name.text)
# Cant click, not a problem just keep clicking
try:
press = r.find_element_by_css_selector('svg')
press.click()
# print('CLICKED ON: ' + col_name.text)
sleep(1)
except:
# print('NOT CLICKED IN: ' + col_name.text)
pass
# Now we finally take the data we want
raw_dict = {}
rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]')
for r in rows:
# Take the data
info = r.find_element_by_css_selector('div[data-test="fin-col"] > span')
# Column name
col_name = r.find_element_by_css_selector('div[title] > span')
raw_dict[col_name.text] = [info.text]
# Close webdrive
browser.quit()
# Convert to dict to df and values to numbers
bs = pd.DataFrame.from_dict(raw_dict)
bs = bs.replace(',', '', regex=True)
bs = bs.astype('double')
# All values are in thousand
bs = bs * 1000
return bs
def balance_allyears(ticker, wd_path):
'''
Uses selenium webdriver to open the Yahoo Finance
balance sheet page and expand all possibles rows.
Then, download fundamental information for all years
available.
Args:
-------
ticker (str): must use the same ticker as yahoo finance
wd_path (str): absolute path to webdriver executable
Returns:
-------
pd.DataFrame: Balance sheets data (each row is a year)
'''
# Web page
url = 'https://finance.yahoo.com/quote/{}/balance-sheet?p={}'.format(ticker, ticker)
# Open webdriver
browser = webdriver.Chrome(executable_path=wd_path)
# Open page
browser.get(url)
sleep(2)
# Expand all possible rows
cols = []
# Try clicking in everything possible 5 times
for i in range(5):
rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]')
for r in rows:
col_name = r.find_element_by_css_selector('span')
# Dont click the same col twice
if col_name.text not in cols:
cols.append(col_name.text)
# Cant click, not a problem just keep clicking
try:
press = r.find_element_by_css_selector('svg')
press.click()
# print('CLICKED ON: ' + col_name.text)
sleep(1)
except:
# print('NOT CLICKED IN: ' + col_name.text)
pass
# Now we finally take the data we want
raw_dict = {}
rows = browser.find_elements_by_css_selector('div[data-test="fin-row"]')
for r in rows:
# Take the data
info = r.find_elements_by_css_selector('div[data-test="fin-col"]')
# Column name
col_name = r.find_element_by_css_selector('div[title] > span')
info_l = []
for inf in info[:4]:
info_l.append(inf.text)
raw_dict[col_name.text] = info_l
# Close webdrive
browser.quit()
# Convert to dict to df and values to numbers
bs = | pd.DataFrame.from_dict(raw_dict) | pandas.DataFrame.from_dict |
from typing import List, Dict
import pandas as pd
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.column_names_enum import ColumnNamesEnum
from reinvent_scoring.scoring.diversity_filters.curriculum_learning.memory_record_dto import MemoryRecordDTO
from reinvent_scoring.scoring.score_summary import ComponentSummary
from reinvent_scoring.scoring.enums.scoring_function_component_enum import ScoringFunctionComponentNameEnum
class DiversityFilterMemory:
def __init__(self):
self._sf_component_name = ScoringFunctionComponentNameEnum()
self._column_name = ColumnNamesEnum()
df_dict = {self._column_name.STEP: [], self._column_name.SCAFFOLD: [], self._column_name.SMILES: [],
self._column_name.METADATA: []}
self._memory_dataframe = pd.DataFrame(df_dict)
def update(self, dto: MemoryRecordDTO):
component_scores = {c.parameters.name: float(c.total_score[dto.id]) for c in dto.components}
component_scores = self._include_raw_score(dto.id, component_scores, dto.components)
component_scores[self._sf_component_name.TOTAL_SCORE] = float(dto.score)
if not self.smiles_exists(dto.smile): self._add_to_memory_dataframe(dto, component_scores)
def _add_to_memory_dataframe(self, dto: MemoryRecordDTO, component_scores: Dict):
data = []
headers = []
for name, score in component_scores.items():
headers.append(name)
data.append(score)
headers.append(self._column_name.STEP)
data.append(dto.step)
headers.append(self._column_name.SCAFFOLD)
data.append(dto.scaffold)
headers.append(self._column_name.SMILES)
data.append(dto.smile)
headers.append(self._column_name.METADATA)
data.append(dto.loggable_data)
new_data = | pd.DataFrame([data], columns=headers) | pandas.DataFrame |
#!/usr/bin/python3
"""
Benchmark applications.
"""
import sys
import pandas as pd
import subprocess
import time
from subprocess import PIPE
def start_iperf(name="uec", mode="c", ip="172.17.0.2", t="10", i="1", msg="", file=None):
_cmd = "iperf3 -R -{} {} -i {} -t {}".format(mode, ip, i, t)
if mode == "s":
_cmd = "iperf3 -{} -i {} -t {}".format(mode, i, t)
_exec = "sudo docker exec -it {} {}"
if file:
p = subprocess.Popen(";".join([
_exec.format(name, _cmd),
"" if msg == "" else "echo {}".format(msg),
]), shell=True, stdout=file, stderr=file)
else:
p = subprocess.Popen(";".join([
_exec.format(name, _cmd),
"" if msg == "" else "echo {}".format(msg),
]), shell=True, stdout=PIPE, stderr=PIPE)
output = p.stdout.read()
print(str(output, 'utf-8'))
def parse_iperf(filename="", t="15", i="0.01"):
"""
Parse an iperf text file output and insert into CSV.
"""
if (filename):
with open(filename + ".txt", 'r') as fp:
line = fp.readline()
with open(filename + ".csv", 'w') as emtpy_csv:
pass
prevline = ""
cnt = 1
iperf_cnt = 0
line_cnt = 0
collect_data = False
col = []
while line:
if (("------" in line or "- - - - " in line or "Connecting to" in line) and collect_data):
try:
csv_input = | pd.read_csv(filename + ".csv") | pandas.read_csv |
import json
import os
import pandas as pd
from running_modes.configurations.general_configuration_envelope import GeneralConfigurationEnvelope
from running_modes.scoring.logging.base_scoring_logger import BaseScoringLogger
from scoring.score_summary import FinalSummary
from utils.enums.scoring_runner_enum import ScoringRunnerEnum
class LocalScoringLogger(BaseScoringLogger):
def __init__(self, configuration: GeneralConfigurationEnvelope):
super().__init__(configuration)
self._scoring_runner_enum = ScoringRunnerEnum()
def log_message(self, message: str):
self._logger.info(message)
def log_out_input_configuration(self):
file = os.path.join(self._log_config.logging_path, "input.json")
jsonstr = json.dumps(self._configuration, default=lambda x: x.__dict__, sort_keys=True, indent=4,
separators=(',', ': '))
with open(file, 'w') as f:
f.write(jsonstr)
def log_results(self, score_summary: FinalSummary):
output_file = os.path.join(self._log_config.logging_path, "scored_smiles.csv")
component_names = [c.name for c in score_summary.profile]
data_list = self._convolute_score_summary(score_summary)
data_df = self._construct_df_from_list(data=data_list, component_names=component_names)
data_df.to_csv(path_or_buf=output_file, sep=',', header=True, index=False)
@staticmethod
def _convolute_score_summary(score_summary: FinalSummary) -> []:
#TODO: seems like this can benefit from some refactoring
"""iterate over all smiles and extract scores, components and validity for each"""
smiles = score_summary.scored_smiles
component_scores = [c.score for c in score_summary.profile]
data = []
for i_smile in range(len(smiles)):
score = '0'
valid = '0'
if i_smile in score_summary.valid_idxs:
score = str(score_summary.total_score[i_smile])
valid = '1'
row = [smiles[i_smile], score]
for component in component_scores:
row.append(component[i_smile])
row.append(valid)
data.append(row)
return data
def _construct_df_from_list(self, data: list, component_names: list) -> pd.DataFrame:
column_names = [self._scoring_runner_enum.SMILES, self._scoring_runner_enum.TOTAL_SCORE]
column_names.extend(component_names)
column_names.append(self._scoring_runner_enum.VALID)
dataframe = | pd.DataFrame(data, columns=column_names, dtype=str) | pandas.DataFrame |
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import matplotlib.pyplot as plt
import numpy as np
import scipy.integrate
from matplotlib.colors import LogNorm
from matplotlib.colors import TwoSlopeNorm
from scipy.interpolate import interp1d
try:
import pandas as pd
except ImportError:
pd = None
import pykooh
from .motion import TimeSeriesMotion, WaveField, GRAVITY
def plot_amplification_evolv(
calc,
metric="accel_tf",
depths=None,
freqs=None,
normalized=False,
wave_field_out="within",
diverging_cmap=True,
include_vs_profile=False,
ax=None,
**kwds,
):
# Default plotting kwds. Combine both set of plot keywords and prefer the provided
kwds = {
"cmap": "RdBu" if diverging_cmap else "magma_r",
"shading": "gouraud",
"norm": TwoSlopeNorm(vmin=0, vcenter=1) if diverging_cmap else LogNorm(),
} | kwds
if freqs is None:
freqs = np.logspace(-1, 2, num=301)
osc_damping = 0.05 if "osc_damping" not in kwds else kwds["osc_damping"]
ln_freqs = np.log(freqs)
ln_freqs_mot = np.log(calc.motion.freqs)
def get_amp(metric, depth):
loc_output = calc.profile.location(wave_field_out, depth=depth)
if metric == "accel_tf":
y = np.abs(calc.calc_accel_tf(calc.loc_input, loc_output))
# Interpolate the specific frequencies
y = np.interp(ln_freqs, ln_freqs_mot, y)
elif metric == "site_amp":
if get_amp.in_ars is None:
get_amp.in_ars = calc.motion.calc_osc_accels(freqs, osc_damping)
out_ars = calc.motion.calc_osc_accels(
freqs, osc_damping, calc.calc_accel_tf(calc.loc_input, loc_output)
)
y = out_ars / get_amp.in_ars
else:
raise NotImplementedError
return y
# Initialize static variable
get_amp.in_ars = None
if depths is None:
depths = np.linspace(0, calc.profile[-1].depth)
if ax is None:
fig, ax = plt.subplots()
amps = np.array([get_amp(metric, d) for d in depths])
if normalized:
amps /= amps[-1, :]
cf = ax.pcolormesh(freqs, depths, amps, **kwds)
cb = plt.colorbar(cf, ax=ax)
cb.set_label("|TF|" if metric == "accel_tf" else "Site Ampl.")
ax.set(
xlabel="Frequency (Hz)",
xscale="log",
ylabel="Depth (m)",
yscale="linear",
ylim=(depths[0], 0),
)
return ax
class OutputCollection(collections.abc.Collection):
def __init__(self, outputs):
super().__init__()
self.outputs = outputs
def __iter__(self):
return iter(self.outputs)
def __contains__(self, value):
return value in self.outputs
def __len__(self):
return len(self.outputs)
def __getitem__(self, key):
return self.outputs[key]
def __call__(self, calc, name=None):
# Save results
for o in self:
o(calc, name=name)
def reset(self):
for o in self:
o.reset()
def append_arrays(many, single):
"""Append an array to another padding with NaNs for constant length.
Parameters
----------
many : array_like of rank (j, k)
values appended to a copy of this array. This may be a 1-D or 2-D
array.
single : array_like of rank l
values to append. This should be a 1-D array.
Returns
-------
append : :class:`numpy.ndarray`
2-D array with rank (j + 1, max(k, l)) with missing values padded
with :class:`numpy.nan`
"""
assert np.ndim(single) == 1
# Check if the values need to be padded to for equal length
diff = single.shape[0] - many.shape[0]
if diff < 0:
single = np.pad(single, (0, -diff), "constant", constant_values=np.nan)
elif diff > 0:
# Need different padding based on if many is 1d or 2d.
padding = ((0, diff), (0, 0)) if len(many.shape) > 1 else (0, diff)
many = np.pad(many, padding, "constant", constant_values=np.nan)
else:
# No padding needed
pass
return np.c_[many, single]
class Output(object):
_const_ref = False
xscale = "log"
yscale = "log"
drawstyle = "default"
def __init__(self, refs=None):
self._refs = np.asarray([] if refs is None else refs)
self._values = None
self._names = []
def __call__(self, calc, name=None):
if name is None:
if self.values is None:
i = 1
elif len(self.values.shape) == 1:
i = 2
else:
i = self.values.shape[1] + 1
name = "r%d" % i
self._names.append(name)
@property
def refs(self):
return self._refs
@property
def values(self):
return self._values
@property
def names(self):
return self._names
def reset(self):
self._values = None
self._names = []
if not self._const_ref:
self._refs = np.array([])
def iter_results(self):
shared_ref = len(self.refs.shape) == 1
for i, name in enumerate(self.names):
refs = self.refs if shared_ref else self.refs[:, i]
values = self.values if len(self.values.shape) == 1 else self.values[:, i]
yield name, refs, values
def _add_refs(self, refs):
refs = np.asarray(refs)
if len(self._refs) == 0:
self._refs = np.array(refs)
else:
self._refs = append_arrays(self._refs, refs)
def _add_values(self, values):
values = np.asarray(values)
if self._values is None:
self._values = values
else:
self._values = append_arrays(self._values, values)
def calc_stats(self, as_dataframe=False):
ln_values = np.log(self.values)
median = np.exp(np.nanmean(ln_values, axis=1))
ln_std = np.nanstd(ln_values, axis=1)
stats = {"ref": self.refs, "median": median, "ln_std": ln_std}
if as_dataframe and pd:
stats = pd.DataFrame(stats).set_index("ref")
stats.index.name = self.ref_name
return stats
def to_dataframe(self):
if not pd:
raise RuntimeError("Install `pandas` library.")
if isinstance(self.names[0], tuple):
columns = | pd.MultiIndex.from_tuples(self.names) | pandas.MultiIndex.from_tuples |
__all__ = ["ExtremaCalculator"]
import pdb # noqa: F401
import pandas as pd
import matplotlib as mpl
import numpy as np
import solarwindpy as swp
class ExtremaCalculator(object):
r"""Calculate the minima and maxima for a activity index, defining an
Indicator Cycle starting at Minima N and ending at Minimum N+1.
Properties
----------
data: pd.Series
Activity index, possibly smoothed by `window`.
raw: pd.Series
Activity index as passed, not smoothed.
name: str
Key identifying index type.
window: scalar, None
If not None, the number of days for applying the rolling window.
threshold: pd.Series
The threshold for determining Maxima and Minima in a `pd.Series` for each
point in the `data`.
extrema_finders: namedtuple
Contains times when `data` crosses `threshold` (changes) and DateTime information
binned by the ranges for identifying extrema (`cut`).
extrema: pd.Series
Extrema times identifies by extrema type.
formatted_extrema: pd.DataFrame
Extrema formatted as the SSN extrema:
========== ============ ============
Interval Min Max
========== ============ ============
-1 <DateTime> <DateTime>
0 <DateTime> <DateTime>
1 <DateTime> <DateTime>
...
N <DateTime> <DateTime>
========== ============ ============
Methods
-------
set_name, set_data, set_threshold, find_extrema, make_plot
"""
def __init__(self, name, activity_index, threshold=None, window=600):
r"""Parameters
----------
name: str
key used to select activity indicator.
activity_index: pd.Series
Data as measured for the index.
threshold: scalar, FunctionType, None
If scalar, the threshold for selecting data for finding Maxima/Minima.
If FunctionType, called on `activity_index` (`self.data`) to calculate the threshold.
If None, pull scalar from an internal dictionary. If not present in dictionary,
calculate with `np.nanmedian`.
window: scalar
The number of days to apply for a rolling window mean.
"""
self.set_name(name)
self.set_data(activity_index, window)
self.set_threshold(threshold)
self.find_threshold_crossings()
self.find_extrema()
@property
def data(self):
return self._data
@property
def raw(self):
return self._raw
@property
def name(self):
r"""Activity index name.
"""
return self._name
@property
def window(self):
return self._window
@property
def threshold(self):
return self._threshold
@property
def extrema_finders(self):
return self._extrema_finders
@property
def extrema(self):
return self._extrema
@property
def threshold_crossings(self):
return self._threshold_crossings
@property
def data_in_extrema_finding_intervals(self):
return self._data_in_extrema_finding_intervals
@property
def formatted_extrema(self):
return self._formatted_extrema
def set_name(self, new):
if new in ("delk2", "delwb", "k2vk3", "viored", "delk1"):
raise ValueError(
"Unable to determine threshold. You need to check this one."
)
self._name = str(new)
def set_data(self, index, window):
if self.name in ("delk1", "delk2", "delwb", "emdx", "k2vk3", "k3", "viored"):
# We don't trust CaK before then.
index = index.loc["1977-01-01":]
rolled = index
if window is not None:
rolled = index.rolling("%sd" % window).mean()
rolled.index = rolled.index - pd.to_timedelta("%sd" % (window / 2.0))
self._raw = index
self._data = rolled
self._window = window
def _format_axis(self, ax):
left, _ = ax.get_xlim()
left = pd.to_datetime(
"{}-01-01".format(pd.to_datetime(mpl.dates.num2date(left)).year - 1)
)
ax.set_xlim(
left=mpl.dates.date2num(left),
right=mpl.dates.date2num(pd.to_datetime("2020-01-02")),
)
ax.xaxis.set_major_formatter(mpl.dates.DateFormatter("%Y"))
ax.xaxis.set_major_locator(mpl.dates.YearLocator(2))
ax.figure.autofmt_xdate()
hdl, lbl = ax.get_legend_handles_labels()
hdl = np.asarray(hdl)
lbl = np.asarray(lbl)
tk = lbl != "indicator"
ax.legend(hdl[tk], lbl[tk], loc=0, ncol=1, framealpha=0)
ax.set_ylabel(self.name)
ax.set_xlabel("Year")
def _plot_data(self, ax):
x = mpl.dates.date2num(self.data.index)
y = self.data.values
ax.plot(x, y, color="C0", label="Rolled")
# x = mpl.dates.date2num(self.raw.index)
# y = self.raw.values
# ax.plot(x, y, color="C2", label="Raw")
def _plot_threshold(self, ax):
x = mpl.dates.date2num(self.data.index)
y = self.threshold
ax.plot(x, y, color="C1", label="{:.5f}".format(self.threshold.unique()[0]))
def _plot_extrema_ranges(self, ax):
joint = pd.concat(
{"cut": self.data_in_extrema_finding_intervals, "indicator": self.data},
axis=1,
)
gb = joint.groupby("cut")
ngroup = 0
for k, v in gb:
color = "darkorange" if ngroup % 2 else "fuchsia"
v.plot(ax=ax, color=color, ls="--", label=None)
ngroup += 1
ax.legend_.set_visible(False)
def _plot_threshold_crossings(self, ax):
crossings = self.threshold_crossings
crossings.plot(ax=ax, color="cyan", marker="P", ls="none", label="Changes")
ax.legend()
def _plot_extrema(self, ax):
maxima = self.data.loc[self.extrema.index].loc[self.extrema == "Max"]
minima = self.data.loc[self.extrema.index].loc[self.extrema == "Min"]
for ex, c, lbl in zip((maxima, minima), ("red", "limegreen"), ("Max", "Min")):
x = mpl.dates.date2num(ex.index)
y = ex
ax.plot(x, y, color=c, label=lbl, ls="none", marker="*")
def set_threshold(self, threshold):
from numbers import Number
from types import FunctionType
automatic = {
"LymanAlpha": 4.1,
"delk1": 0.62,
"emdx": 0.091,
"f107": 110.0,
"k3": 0.066,
"mg_index": 0.27,
"sd_70": 13.0,
"sl_70": 2.0, # Actually log10(sl_70)
"viored": 1.29,
}
if threshold is None:
threshold = automatic.get(self.name, np.nanmedian)
if isinstance(threshold, FunctionType):
threshold = threshold(self.data)
elif isinstance(threshold, Number):
pass
threshold = pd.Series(threshold, index=self.data.index)
self._threshold = threshold
@staticmethod
def _find_extrema(threshold, cut, data):
joint = pd.concat({"cut": cut, "indicator": data}, axis=1)
gb = joint.groupby("cut")
thresh = threshold.unique()
assert thresh.size == 1
thresh = thresh[0]
maxima = {}
minima = {}
for k, v in gb:
# Lots of logic to ensure we only have one minima or one maxima
v = v.indicator
vclean = v.dropna()
if not vclean.size:
# No valid data in this range
continue
is_max = (vclean > thresh).value_counts()
if is_max.size > 1:
is_max = is_max.replace(1, np.nan).dropna()
assert is_max.size == 1
is_max = is_max.index[0]
if is_max:
maxima[k] = vclean.idxmax()
else:
minima[k] = vclean.idxmin()
maxima = pd.Series("Max", index=maxima.values())
minima = pd.Series("Min", index=minima.values())
return maxima, minima
def _validate_extrema(self, maxima, minima):
name = self.name
if name == "LymanAlpha":
maxima = maxima.iloc[1:]
elif name == "delk1":
minima = minima.iloc[1:-1]
# elif name == "emdx":
# minima = minima.iloc[2:]
# maxima = maxima.iloc[:-1]
elif name == "f107":
minima = minima.iloc[:-1]
maxima = maxima.iloc[1:]
elif name == "mg_index":
maxima = maxima.iloc[1:]
elif name == "sd_70":
minima = minima.iloc[:-1]
elif name == "sl_70":
minima = minima.iloc[1:]
elif name == "viored":
minima = minima.iloc[1:-1]
minimum_seperation = pd.to_timedelta("1000d")
tk_max = maxima.index.to_series().diff() > minimum_seperation
tk_min = minima.index.to_series().diff() > minimum_seperation
# 0th entry diff is NaT -> False by default.
tk_max.iloc[0] = True
tk_min.iloc[0] = True
maxima = maxima.loc[tk_max]
minima = minima.loc[tk_min]
return maxima, minima
def find_threshold_crossings(self):
data = self.data
threshold = self.threshold
high = data > threshold
low = data < threshold
dhigh = high.astype(int).diff() != 0
dlow = low.astype(int).diff() != 0
deltas = dlow | dhigh
crossings = data.where(deltas).dropna()
self._threshold_crossings = crossings
return crossings
def cut_data_into_extrema_finding_intervals(self):
data = self.data
raw = self.raw
crossings = self.threshold_crossings
bins = crossings.index
if bins[-1] < raw.index[-1]:
bins = bins.append(pd.DatetimeIndex([raw.index[-1]]))
if bins[0] > raw.index[0]:
bins = bins.append(pd.DatetimeIndex([raw.index[0]]))
bins = bins.sort_values()
cut = pd.cut(data.index, bins=bins)
cut = pd.Series(cut, index=data.index)
self._data_in_extrema_finding_intervals = cut
return cut
@staticmethod
def format_extrema(extrema):
minima = extrema.loc[extrema == "Min"]
maxima = extrema.loc[extrema == "Max"]
min0 = minima.index[0]
max0 = maxima.index[0]
if max0 < min0:
minima = pd.Series(minima.index, np.arange(minima.index.size))
maxima = pd.Series(maxima.index, np.arange(maxima.index.size) - 1)
else:
minima = pd.Series(minima.index, np.arange(minima.index.size))
maxima = pd.Series(maxima.index, np.arange(maxima.index.size))
formatted = pd.concat({"Min": minima, "Max": maxima}, axis=1, names=["kind"])
formatted.index.name = "cycle"
return formatted
def find_extrema(self):
# raw = self.raw
data = self.data
threshold = self.threshold
cut = self.cut_data_into_extrema_finding_intervals()
maxima, minima = self._find_extrema(threshold, cut, data) # data -> raw
maxima, minima = self._validate_extrema(maxima, minima)
extrema = | pd.concat([maxima, minima], axis=0) | pandas.concat |
from sklearn.linear_model import LinearRegression
import xgboost
import shap
from sklearn.model_selection import train_test_split
import pandas as pd
from sklearn import preprocessing, impute
import numpy as np
from sklearn.decomposition import PCA
class ProcessingBlock:
def __init__(self):
"""[Initaties the pre-processing block. Uses `sklearn.StandardScaler`
for standardization of inputs and `sklearn.SimpleImputer`
for imputing missing values]
"""
print("Processing Block Constructed")
self.X_scaler = preprocessing.StandardScaler()
self.y_scaler = preprocessing.StandardScaler()
self.imputer = impute.SimpleImputer(
missing_values=np.nan, strategy="most_frequent"
)
def fit(self, X, y):
"""[Stores the given X,y data in the object fields X and y]
Args:
X ([np.array or pd.DataFrame]): [Input data]
y ([np.array or pd.DataFrame]): [Input labels]
Returns:
[self]: [returns the object itselt]
"""
self.X = X
self.y = y
return self
def split_data(self, X, y=None, test_split=0.2, scale=False):
"""[Splits the data into training and test set]
Args:
X ([np.array or pd.DataFrame]): [Input data]
y ([np.array or pd.DataFrame], optional): [Input labels.]. Defaults to None.
test_split (float, optional): [Test data split size]. Defaults to 0.2.
scale (bool, optional): [Keyword to enable standardization of data]. Defaults to False.
Returns:
[np.array or pd.DataFrame]: [If `y` is given, returns X_train,X_test,y_train,y_test
Otherwise returns X_train,X_test]
"""
if scale:
X = self.X_scaler.fit_transform(X)
if y is not None:
X_df = | pd.DataFrame(X) | pandas.DataFrame |
import tiledb, numpy as np
import json
import sys
import os
import io
from collections import OrderedDict
import warnings
from tiledb import TileDBError
if sys.version_info >= (3,3):
unicode_type = str
else:
unicode_type = unicode
unicode_dtype = np.dtype(unicode_type)
# TODO
# - handle missing values
# - handle extended datatypes
# - implement distributed CSV import
# - implement support for read CSV via TileDB VFS from any supported FS
TILEDB_KWARG_DEFAULTS = {
'ctx': None,
'sparse': True,
'index_dims': None,
'allows_duplicates': True,
'mode': 'ingest',
'attrs_filters': None,
'coords_filters': None,
'full_domain': False,
'tile': None,
'row_start_idx': None,
'fillna': None,
'column_types': None,
'capacity': None,
'date_spec': None,
'cell_order': 'row-major',
'tile_order': 'row-major',
'debug': None,
}
def parse_tiledb_kwargs(kwargs):
args = dict(TILEDB_KWARG_DEFAULTS)
for key in TILEDB_KWARG_DEFAULTS.keys():
if key in kwargs:
args[key] = kwargs.pop(key)
return args
class ColumnInfo:
def __init__(self, dtype, repr=None):
self.dtype = dtype
self.repr = repr
def dtype_from_column(col):
import pandas as pd
col_dtype = col.dtype
# TODO add more basic types here
if col_dtype in (np.int32, np.int64, np.uint32, np.uint64, np.float, np.double,
np.uint8):
return ColumnInfo(col_dtype)
# TODO this seems kind of brittle
if col_dtype.base == np.dtype('M8[ns]'):
if col_dtype == np.dtype('datetime64[ns]'):
return ColumnInfo(col_dtype)
elif hasattr(col_dtype, 'tz'):
raise ValueError("datetime with tz not yet supported")
else:
raise ValueError("unsupported datetime subtype ({})".format(type(col_dtype)))
# Pandas 1.0 has StringDtype extension type
if col_dtype.name == 'string':
return ColumnInfo(unicode_dtype)
if col_dtype == 'bool':
return ColumnInfo(np.uint8, repr=np.dtype('bool'))
if col_dtype == np.dtype("O"):
# Note: this does a full scan of the column... not sure what else to do here
# because Pandas allows mixed string column types (and actually has
# problems w/ allowing non-string types in object columns)
inferred_dtype = pd.api.types.infer_dtype(col)
if inferred_dtype == 'bytes':
return ColumnInfo(np.bytes_)
elif inferred_dtype == 'string':
# TODO we need to make sure this is actually convertible
return ColumnInfo(unicode_dtype)
elif inferred_dtype == 'mixed':
raise ValueError(
"Column '{}' has mixed value dtype and cannot yet be stored as a TileDB attribute".format(col.name)
)
raise ValueError(
"Unhandled column type: '{}'".format(
col_dtype
)
)
# TODO make this a staticmethod on Attr?
def attrs_from_df(df,
index_dims=None, filters=None,
column_types=None, ctx=None):
attr_reprs = dict()
if ctx is None:
ctx = tiledb.default_ctx()
if column_types is None:
column_types = dict()
attrs = list()
for name, col in df.items():
# ignore any column used as a dim/index
if index_dims and name in index_dims:
continue
if name in column_types:
spec_type = column_types[name]
# Handle ExtensionDtype
if hasattr(spec_type, 'type'):
spec_type = spec_type.type
attr_info = ColumnInfo(spec_type)
else:
attr_info = dtype_from_column(col)
attrs.append(tiledb.Attr(name=name, dtype=attr_info.dtype, filters=filters))
if attr_info.repr is not None:
attr_reprs[name] = attr_info.repr
return attrs, attr_reprs
def dim_info_for_column(ctx, df, col, tile=None, full_domain=False, index_dtype=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if len(col_values) < 1:
raise ValueError("Empty column '{}' cannot be used for dimension!".format(col_name))
if index_dtype is not None:
dim_info = ColumnInfo(index_dtype)
elif col_values.dtype is np.dtype('O'):
col_val0_type = type(col_values[0])
if col_val0_type in (bytes, unicode_type):
# TODO... core only supports TILEDB_ASCII right now
dim_info = ColumnInfo(np.bytes_)
else:
raise TypeError("Unknown column type not yet supported ('{}')".format(col_val0_type))
else:
dim_info = dtype_from_column(col_values)
return dim_info
def dim_for_column(ctx, name, dim_info, col, tile=None, full_domain=False, ndim=None):
if isinstance(col, np.ndarray):
col_values = col
else:
col_values = col.values
if tile is None:
if ndim is None:
raise TileDBError("Unexpected Nonetype ndim")
if ndim == 1:
tile = 10000
elif ndim == 2:
tile = 1000
elif ndim == 3:
tile = 100
else:
tile = 10
dtype = dim_info.dtype
if full_domain:
if not dim_info.dtype in (np.bytes_, np.unicode):
# Use the full type domain, deferring to the constructor
(dtype_min, dtype_max) = tiledb.libtiledb.dtype_range(dim_info.dtype)
dim_max = dtype_max
if dtype.kind == 'M':
date_unit = np.datetime_data(dtype)[0]
dim_min = np.datetime64(dtype_min + 1, date_unit)
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = np.datetime64(dtype_max - tile, date_unit)
elif dtype is np.int64:
dim_min = dtype_min + 1
else:
dim_min = dtype_min
if dtype.kind != 'M' and np.issubdtype(dtype, np.integer):
tile_max = np.iinfo(np.uint64).max - tile
if np.abs(np.uint64(dtype_max) - np.uint64(dtype_min)) > tile_max:
dim_max = dtype_max - tile
else:
dim_min, dim_max = (None, None)
else:
dim_min = np.min(col_values)
dim_max = np.max(col_values)
if not dim_info.dtype in (np.bytes_, np.unicode):
if np.issubdtype(dtype, np.integer):
dim_range = np.uint64(np.abs(np.uint64(dim_max) - np.uint64(dim_min)))
if dim_range < tile:
tile = dim_range
elif np.issubdtype(dtype, np.float64):
dim_range = dim_max - dim_min
if dim_range < tile:
tile = np.ceil(dim_range)
dim = tiledb.Dim(
name = name,
domain = (dim_min, dim_max),
dtype = dim_info.dtype,
tile = tile
)
return dim
def get_index_metadata(dataframe):
md = dict()
for index in dataframe.index.names:
# Note: this may be expensive.
md[index] = dtype_from_column(dataframe.index.get_level_values(index)).dtype
return md
def create_dims(ctx, dataframe, index_dims,
tile=None, full_domain=False, sparse=None):
import pandas as pd
index = dataframe.index
index_dict = OrderedDict()
index_dtype = None
per_dim_tile = False
if tile is not None:
if isinstance(tile, dict):
per_dim_tile = True
# input check, can't do until after per_dim_tile
if (per_dim_tile and not all(map(lambda x: isinstance(x,(int,float)), tile.values()))) or \
(per_dim_tile is False and not isinstance(tile, (int,float))):
raise ValueError("Invalid tile kwarg: expected int or tuple of ints "
"got '{}'".format(tile))
if isinstance(index, pd.MultiIndex):
for name in index.names:
index_dict[name] = dataframe.index.get_level_values(name)
elif isinstance(index, (pd.Index, pd.RangeIndex, pd.Int64Index)):
if hasattr(index, 'name') and index.name is not None:
name = index.name
else:
index_dtype = np.dtype('uint64')
name = 'rows'
index_dict[name] = index.values
else:
raise ValueError("Unhandled index type {}".format(type(index)))
# create list of dim types
# we need to know all the types in order to validate before creating Dims
dim_types = list()
for idx,(name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dim_types.append(dim_info_for_column(ctx, dataframe, values,
tile=dim_tile, full_domain=full_domain,
index_dtype=index_dtype))
if any([d.dtype in (np.bytes_, np.unicode_) for d in dim_types]):
if sparse is False:
raise TileDBError("Cannot create dense array with string-typed dimensions")
elif sparse is None:
sparse = True
d0 = dim_types[0]
if not all(d0.dtype == d.dtype for d in dim_types[1:]):
if sparse is False:
raise TileDBError("Cannot create dense array with heterogeneous dimension data types")
elif sparse is None:
sparse = True
ndim = len(dim_types)
dims = list()
for idx, (name, values) in enumerate(index_dict.items()):
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
dims.append(dim_for_column(ctx, name, dim_types[idx], values,
tile=dim_tile, full_domain=full_domain, ndim=ndim))
if index_dims:
for name in index_dims:
if per_dim_tile and name in tile:
dim_tile = tile[name]
elif per_dim_tile:
# in this case we fall back to the default
dim_tile = None
else:
# in this case we use a scalar (type-checked earlier)
dim_tile = tile
col = dataframe[name]
dims.append(
dim_for_column(ctx, dataframe, col.values, name, tile=dim_tile)
)
return dims, sparse
def write_array_metadata(array, attr_metadata = None, index_metadata = None):
"""
:param array: open, writable TileDB array
:param metadata: dict
:return:
"""
if attr_metadata:
attr_md_dict = {n: str(t) for n,t in attr_metadata.items()}
array.meta['__pandas_attribute_repr'] = json.dumps(attr_md_dict)
if index_metadata:
index_md_dict = {n: str(t) for n,t in index_metadata.items()}
array.meta['__pandas_index_dims'] = json.dumps(index_md_dict)
def from_dataframe(uri, dataframe, **kwargs):
# deprecated in 0.6.3
warnings.warn("tiledb.from_dataframe is deprecated; please use .from_pandas",
DeprecationWarning)
from_pandas(uri, dataframe, **kwargs)
def from_pandas(uri, dataframe, **kwargs):
"""Create TileDB array at given URI from pandas dataframe
:param uri: URI for new TileDB array
:param dataframe: pandas DataFrame
:param mode: Creation mode, one of 'ingest' (default), 'schema_only', 'append'
:Keyword Arguments: optional keyword arguments for TileDB, see ``tiledb.from_csv``.
:raises: :py:exc:`tiledb.TileDBError`
:return: None
"""
import pandas as pd
args = parse_tiledb_kwargs(kwargs)
ctx = args.get('ctx', None)
tile_order = args['tile_order']
cell_order = args['cell_order']
allows_duplicates = args.get('allows_duplicates', True)
sparse = args['sparse']
index_dims = args.get('index_dims', None)
mode = args.get('mode', 'ingest')
attrs_filters = args.get('attrs_filters', None)
coords_filters = args.get('coords_filters', None)
full_domain = args.get('full_domain', False)
capacity = args.get('capacity', False)
tile = args.get('tile', None)
nrows = args.get('nrows', None)
row_start_idx = args.get('row_start_idx', None)
fillna = args.pop('fillna', None)
date_spec = args.pop('date_spec', None)
column_types = args.pop('column_types', None)
write = True
create_array = True
if mode is not None:
if mode == 'schema_only':
write = False
elif mode == 'append':
create_array = False
elif mode != 'ingest':
raise TileDBError("Invalid mode specified ('{}')".format(mode))
if capacity is None:
capacity = 0 # this will use the libtiledb internal default
if ctx is None:
ctx = tiledb.default_ctx()
if create_array:
if attrs_filters is None:
attrs_filters = tiledb.FilterList(
[tiledb.ZstdFilter(1, ctx=ctx)])
if coords_filters is None:
coords_filters = tiledb.FilterList(
[tiledb.ZstdFilter(1, ctx=ctx)])
if nrows:
if full_domain is None:
full_domain = False
# create the domain and attributes
# if sparse==None then this function may return a default based on types
dims, sparse = create_dims(ctx, dataframe, index_dims, sparse=sparse,
tile=tile, full_domain=full_domain)
domain = tiledb.Domain(
*dims,
ctx = ctx
)
attrs, attr_metadata = attrs_from_df(dataframe,
index_dims=index_dims,
filters=attrs_filters,
column_types=column_types)
# now create the ArraySchema
schema = tiledb.ArraySchema(
domain=domain,
attrs=attrs,
cell_order=cell_order,
tile_order=tile_order,
coords_filters=coords_filters,
allows_duplicates=allows_duplicates,
capacity=capacity,
sparse=sparse
)
tiledb.Array.create(uri, schema, ctx=ctx)
# apply fill replacements for NA values if specified
if fillna is not None:
dataframe.fillna(fillna, inplace=True)
# apply custom datetime parsing to given {'column_name': format_spec} pairs
# format_spec should be provied using Python format codes:
# https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
if date_spec is not None:
if type(date_spec) is not dict:
raise TypeError("Expected 'date_spec' to be a dict, got {}".format(type(date_spec)))
for name, spec in date_spec.items():
dataframe[name] = pd.to_datetime(dataframe[name], format=spec)
if write:
write_dict = {k: v.values for k,v in dataframe.to_dict(orient='series').items()}
index_metadata = get_index_metadata(dataframe)
try:
A = tiledb.open(uri, 'w', ctx=ctx)
if A.schema.sparse:
coords = []
for k in range(A.schema.ndim):
coords.append(dataframe.index.get_level_values(k))
# TODO ensure correct col/dim ordering
A[tuple(coords)] = write_dict
else:
if row_start_idx is None:
row_start_idx = 0
row_end_idx = row_start_idx + len(dataframe)
A[row_start_idx:row_end_idx] = write_dict
if create_array:
write_array_metadata(A, attr_metadata, index_metadata)
finally:
A.close()
def _tiledb_result_as_dataframe(readable_array, result_dict):
import pandas as pd
# TODO missing key in the rep map should only be a warning, return best-effort?
# TODO this should be generalized for round-tripping overloadable types
# for any array (e.g. np.uint8 <> bool)
repr_meta = None
index_dims = None
if '__pandas_attribute_repr' in readable_array.meta:
# backwards compatibility
repr_meta = json.loads(readable_array.meta['__pandas_attribute_repr'])
if '__pandas_index_dims' in readable_array.meta:
index_dims = json.loads(readable_array.meta['__pandas_index_dims'])
indexes = list()
for col_name, col_val in result_dict.items():
if repr_meta and col_name in repr_meta:
new_col = pd.Series(col_val, dtype=repr_meta[col_name])
result_dict[col_name] = new_col
elif index_dims and col_name in index_dims:
new_col = pd.Series(col_val, dtype=index_dims[col_name])
result_dict[col_name] = new_col
indexes.append(col_name)
df = pd.DataFrame.from_dict(result_dict)
if len(indexes) > 0:
df.set_index(indexes, inplace=True)
return df
def open_dataframe(uri, ctx=None):
"""Open TileDB array at given URI as a Pandas dataframe
If the array was saved using tiledb.from_dataframe, then columns
will be interpreted as non-primitive pandas or numpy types when
available.
:param uri:
:return: dataframe constructed from given TileDB array URI
**Example:**
>>> import tiledb
>>> df = tiledb.open_dataframe("iris.tldb")
>>> tiledb.objec_type("iris.tldb")
'array'
"""
if ctx is None:
ctx = tiledb.default_ctx()
# TODO support `distributed=True` option?
with tiledb.open(uri, ctx=ctx) as A:
data = A[:]
new_df = _tiledb_result_as_dataframe(A, data)
return new_df
def from_csv(uri, csv_file, **kwargs):
"""
Create TileDB array at given URI from a CSV file or list of files
:param uri: URI for new TileDB array
:param csv_file: input CSV file or list of CSV files.
Note: multi-file ingestion requires a `chunksize` argument. Files will
be read in batches of at least `chunksize` rows before writing to the
TileDB array.
:Keyword Arguments:
- Any ``pandas.read_csv`` supported keyword argument.
- TileDB-specific arguments:
* ``allows_duplicates``: Generated schema should allow duplicates
* ``cell_order``: Schema cell order
* ``tile_order``: Schema tile order
* ``mode``: (default ``ingest``), Ingestion mode: ``ingest``, ``schema_only``,
``append``
* ``full_domain``: Dimensions should be created with full range of the dtype
* ``attrs_filters``: FilterList to apply to all Attributes
* ``coords_filters``: FilterList to apply to all coordinates (Dimensions)
* ``sparse``: (default True) Create sparse schema
* ``tile``: Dimension tiling: accepts either Int or a list of Tuple[Int] with per-dimension
'tile' arguments to apply to the generated ArraySchema.
* ``capacity``: Schema capacity
* ``date_spec``: Dictionary of {``column_name``: format_spec} to apply to date/time
columns which are not correctly inferred by pandas 'parse_dates'.
Format must be specified using the Python format codes:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior
:return: None
**Example:**
>>> import tiledb
>>> tiledb.from_csv("iris.tldb", "iris.csv")
>>> tiledb.object_type("iris.tldb")
'array'
"""
try:
import pandas
except ImportError as exc:
print("tiledb.from_csv requires pandas")
raise
tiledb_args = parse_tiledb_kwargs(kwargs)
multi_file = False
debug = tiledb_args.get('debug', False)
if isinstance(csv_file, str) and not os.path.isfile(csv_file):
# for non-local files, use TileDB VFS i/o
ctx = tiledb_args.get('ctx', tiledb.default_ctx())
vfs = tiledb.VFS(ctx=ctx)
csv_file = tiledb.FileIO(vfs, csv_file, mode='rb')
elif isinstance(csv_file, (list, tuple)):
# TODO may be useful to support a callback here
multi_file = True
mode = kwargs.pop('mode', None)
if mode is not None:
tiledb_args['mode'] = mode
# For schema_only mode we need to pass a max read count into
# pandas.read_csv
# Note that 'nrows' is a pandas arg!
if mode == 'schema_only' and not 'nrows' in kwargs:
kwargs['nrows'] = 500
elif mode not in ['ingest', 'append']:
raise TileDBError("Invalid mode specified ('{}')".format(mode))
chunksize = kwargs.get('chunksize', None)
if multi_file and not chunksize:
raise TileDBError("Multiple input CSV files requires a 'chunksize' argument")
if chunksize is not None or multi_file:
if not 'nrows' in kwargs:
full_domain = True
array_created = False
if mode == 'schema_only':
raise TileDBError("schema_only ingestion not supported for chunked read")
elif mode == 'append':
array_created = True
csv_kwargs = kwargs.copy()
kwargs.update(tiledb_args)
if multi_file:
input_csv_list = csv_file
csv_kwargs.pop("chunksize")
else:
input_csv = csv_file
keep_reading = True
rows_written = 0
csv_idx = 0
df_iter = None
while keep_reading:
# if we have multiple files, read them until we hit row threshold
if multi_file:
rows_read = 0
input_dfs = list()
while rows_read < chunksize and keep_reading:
input_csv = input_csv_list[csv_idx]
df = | pandas.read_csv(input_csv, **csv_kwargs) | pandas.read_csv |
#
# Convert API responses to Pandas DataFrames
#
import pandas as pd
def accounts(data):
"""accounts as dataframe"""
return pd.concat(
pd.json_normalize(v["securitiesAccount"]) for v in data.values()
).set_index("accountId")
def transactions(data):
"""transaction information as Dataframe"""
return | pd.json_normalize(data) | pandas.json_normalize |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy
import pandas
from numba import prange
from numba.extending import register_jitable
from numba.types import (float64, Boolean, Integer, NoneType, Number,
Omitted, StringLiteral, UnicodeType)
from sdc.datatypes.common_functions import TypeChecker
from sdc.datatypes.hpat_pandas_series_rolling_types import SeriesRollingType
from sdc.utils import sdc_overload_method
hpat_pandas_series_rolling_docstring_tmpl = """
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.core.window.Rolling.{method_name}
{limitations_block}
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_{method_name}.py
:language: python
:lines: 27-
:caption: {example_caption}
:name: ex_series_rolling_{method_name}
.. command-output:: python ./series/rolling/series_rolling_{method_name}.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.rolling <pandas.Series.rolling>`
Calling object with a Series.
:ref:`DataFrame.rolling <pandas.DataFrame.rolling>`
Calling object with a DataFrame.
:ref:`Series.{method_name} <pandas.Series.{method_name}>`
Similar method for Series.
:ref:`DataFrame.{method_name} <pandas.DataFrame.{method_name}>`
Similar method for DataFrame.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.rolling.{method_name}()` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling_{method_name}
Parameters
----------
self: :class:`pandas.Series.rolling`
input arg{extra_params}
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
@register_jitable
def arr_apply(arr, func):
"""Apply function for values"""
return func(arr)
@register_jitable
def arr_corr(x, y):
"""Calculate correlation of values"""
if len(x) == 0:
return numpy.nan
return numpy.corrcoef(x, y)[0, 1]
@register_jitable
def arr_nonnan_count(arr):
"""Count non-NaN values"""
return len(arr) - numpy.isnan(arr).sum()
@register_jitable
def arr_cov(x, y, ddof):
"""Calculate covariance of values 1D arrays x and y of the same size"""
if len(x) == 0:
return numpy.nan
return numpy.cov(x, y, ddof=ddof)[0, 1]
@register_jitable
def _moment(arr, moment):
mn = numpy.mean(arr)
s = numpy.power((arr - mn), moment)
return numpy.mean(s)
@register_jitable
def arr_kurt(arr):
"""Calculate unbiased kurtosis of values"""
n = len(arr)
if n < 4:
return numpy.nan
m2 = _moment(arr, 2)
m4 = _moment(arr, 4)
val = 0 if m2 == 0 else m4 / m2 ** 2.0
if (n > 2) & (m2 > 0):
val = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
return val
@register_jitable
def arr_max(arr):
"""Calculate maximum of values"""
if len(arr) == 0:
return numpy.nan
return arr.max()
@register_jitable
def arr_mean(arr):
"""Calculate mean of values"""
if len(arr) == 0:
return numpy.nan
return arr.mean()
@register_jitable
def arr_median(arr):
"""Calculate median of values"""
if len(arr) == 0:
return numpy.nan
return numpy.median(arr)
@register_jitable
def arr_min(arr):
"""Calculate minimum of values"""
if len(arr) == 0:
return numpy.nan
return arr.min()
@register_jitable
def arr_quantile(arr, q):
"""Calculate quantile of values"""
if len(arr) == 0:
return numpy.nan
return numpy.quantile(arr, q)
@register_jitable
def _moment(arr, moment):
mn = numpy.mean(arr)
s = numpy.power((arr - mn), moment)
return numpy.mean(s)
@register_jitable
def arr_skew(arr):
"""Calculate unbiased skewness of values"""
n = len(arr)
if n < 3:
return numpy.nan
m2 = _moment(arr, 2)
m3 = _moment(arr, 3)
val = 0 if m2 == 0 else m3 / m2 ** 1.5
if (n > 2) & (m2 > 0):
val = numpy.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2 ** 1.5
return val
@register_jitable
def arr_std(arr, ddof):
"""Calculate standard deviation of values"""
return arr_var(arr, ddof) ** 0.5
@register_jitable
def arr_sum(arr):
"""Calculate sum of values"""
return arr.sum()
@register_jitable
def arr_var(arr, ddof):
"""Calculate unbiased variance of values"""
length = len(arr)
if length in [0, ddof]:
return numpy.nan
return numpy.var(arr) * length / (length - ddof)
def gen_hpat_pandas_series_rolling_impl(rolling_func, output_type=None):
"""Generate series rolling methods implementations based on input func"""
nan_out_type = output_type is None
def impl(self):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
out_type = input_arr.dtype if nan_out_type == True else output_type # noqa
output_arr = numpy.empty(length, dtype=out_type)
def apply_minp(arr, minp):
finite_arr = arr[numpy.isfinite(arr)]
if len(finite_arr) < minp:
return numpy.nan
else:
return rolling_func(finite_arr)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = apply_minp(arr_range, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = apply_minp(arr_range, minp)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return impl
def gen_hpat_pandas_series_rolling_zerominp_impl(rolling_func, output_type=None):
"""Generate series rolling methods implementations with zero min_periods"""
nan_out_type = output_type is None
def impl(self):
win = self._window
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
out_type = input_arr.dtype if nan_out_type == True else output_type # noqa
output_arr = numpy.empty(length, dtype=out_type)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = rolling_func(arr_range)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = rolling_func(arr_range)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return impl
hpat_pandas_rolling_series_count_impl = register_jitable(
gen_hpat_pandas_series_rolling_zerominp_impl(arr_nonnan_count, float64))
hpat_pandas_rolling_series_kurt_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_kurt, float64))
hpat_pandas_rolling_series_max_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_max, float64))
hpat_pandas_rolling_series_mean_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_mean, float64))
hpat_pandas_rolling_series_median_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_median, float64))
hpat_pandas_rolling_series_min_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_min, float64))
hpat_pandas_rolling_series_skew_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_skew, float64))
hpat_pandas_rolling_series_sum_impl = register_jitable(
gen_hpat_pandas_series_rolling_impl(arr_sum, float64))
@sdc_overload_method(SeriesRollingType, 'apply')
def hpat_pandas_series_rolling_apply(self, func, raw=None):
ty_checker = TypeChecker('Method rolling.apply().')
ty_checker.check(self, SeriesRollingType)
raw_accepted = (Omitted, NoneType, Boolean)
if not isinstance(raw, raw_accepted) and raw is not None:
ty_checker.raise_exc(raw, 'bool', 'raw')
def hpat_pandas_rolling_series_apply_impl(self, func, raw=None):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
def culc_apply(arr, func, minp):
finite_arr = arr.copy()
finite_arr[numpy.isinf(arr)] = numpy.nan
if len(finite_arr) < minp:
return numpy.nan
else:
return arr_apply(finite_arr, func)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = culc_apply(arr_range, func, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = culc_apply(arr_range, func, minp)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return hpat_pandas_rolling_series_apply_impl
@sdc_overload_method(SeriesRollingType, 'corr')
def hpat_pandas_series_rolling_corr(self, other=None, pairwise=None):
ty_checker = TypeChecker('Method rolling.corr().')
ty_checker.check(self, SeriesRollingType)
# TODO: check `other` is Series after a circular import of SeriesType fixed
# accepted_other = (bool, Omitted, NoneType, SeriesType)
# if not isinstance(other, accepted_other) and other is not None:
# ty_checker.raise_exc(other, 'Series', 'other')
accepted_pairwise = (bool, Boolean, Omitted, NoneType)
if not isinstance(pairwise, accepted_pairwise) and pairwise is not None:
ty_checker.raise_exc(pairwise, 'bool', 'pairwise')
nan_other = isinstance(other, (Omitted, NoneType)) or other is None
def hpat_pandas_rolling_series_corr_impl(self, other=None, pairwise=None):
win = self._window
minp = self._min_periods
main_series = self._data
main_arr = main_series._data
main_arr_length = len(main_arr)
if nan_other == True: # noqa
other_arr = main_arr
else:
other_arr = other._data
other_arr_length = len(other_arr)
length = max(main_arr_length, other_arr_length)
output_arr = numpy.empty(length, dtype=float64)
def calc_corr(main, other, minp):
# align arrays `main` and `other` by size and finiteness
min_length = min(len(main), len(other))
main_valid_indices = numpy.isfinite(main[:min_length])
other_valid_indices = numpy.isfinite(other[:min_length])
valid = main_valid_indices & other_valid_indices
if len(main[valid]) < minp:
return numpy.nan
else:
return arr_corr(main[valid], other[valid])
for i in prange(min(win, length)):
main_arr_range = main_arr[:i + 1]
other_arr_range = other_arr[:i + 1]
output_arr[i] = calc_corr(main_arr_range, other_arr_range, minp)
for i in prange(win, length):
main_arr_range = main_arr[i + 1 - win:i + 1]
other_arr_range = other_arr[i + 1 - win:i + 1]
output_arr[i] = calc_corr(main_arr_range, other_arr_range, minp)
return pandas.Series(output_arr)
return hpat_pandas_rolling_series_corr_impl
@sdc_overload_method(SeriesRollingType, 'count')
def hpat_pandas_series_rolling_count(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.core.window.Rolling.count
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_count.py
:language: python
:lines: 27-
:caption: Count of any non-NaN observations inside the window.
:name: ex_series_rolling_count
.. command-output:: python ./series/rolling/series_rolling_count.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.rolling <pandas.Series.rolling>`
Calling object with a Series.
:ref:`DataFrame.rolling <pandas.DataFrame.rolling>`
Calling object with a DataFrame.
:ref:`Series.count <pandas.Series.count>`
Similar method for Series.
:ref:`DataFrame.count <pandas.DataFrame.count>`
Similar method for DataFrame.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.rolling.count()` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling_count
Parameters
----------
self: :class:`pandas.Series.rolling`
input arg
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
ty_checker = TypeChecker('Method rolling.count().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_count_impl
@sdc_overload_method(SeriesRollingType, 'cov')
def hpat_pandas_series_rolling_cov(self, other=None, pairwise=None, ddof=1):
ty_checker = TypeChecker('Method rolling.cov().')
ty_checker.check(self, SeriesRollingType)
# TODO: check `other` is Series after a circular import of SeriesType fixed
# accepted_other = (bool, Omitted, NoneType, SeriesType)
# if not isinstance(other, accepted_other) and other is not None:
# ty_checker.raise_exc(other, 'Series', 'other')
accepted_pairwise = (bool, Boolean, Omitted, NoneType)
if not isinstance(pairwise, accepted_pairwise) and pairwise is not None:
ty_checker.raise_exc(pairwise, 'bool', 'pairwise')
if not isinstance(ddof, (int, Integer, Omitted)):
ty_checker.raise_exc(ddof, 'int', 'ddof')
nan_other = isinstance(other, (Omitted, NoneType)) or other is None
def hpat_pandas_rolling_series_cov_impl(self, other=None, pairwise=None, ddof=1):
win = self._window
minp = self._min_periods
main_series = self._data
main_arr = main_series._data
main_arr_length = len(main_arr)
if nan_other == True: # noqa
other_arr = main_arr
else:
other_arr = other._data
other_arr_length = len(other_arr)
length = max(main_arr_length, other_arr_length)
output_arr = numpy.empty(length, dtype=float64)
def calc_cov(main, other, ddof, minp):
# align arrays `main` and `other` by size and finiteness
min_length = min(len(main), len(other))
main_valid_indices = numpy.isfinite(main[:min_length])
other_valid_indices = numpy.isfinite(other[:min_length])
valid = main_valid_indices & other_valid_indices
if len(main[valid]) < minp:
return numpy.nan
else:
return arr_cov(main[valid], other[valid], ddof)
for i in prange(min(win, length)):
main_arr_range = main_arr[:i + 1]
other_arr_range = other_arr[:i + 1]
output_arr[i] = calc_cov(main_arr_range, other_arr_range, ddof, minp)
for i in prange(win, length):
main_arr_range = main_arr[i + 1 - win:i + 1]
other_arr_range = other_arr[i + 1 - win:i + 1]
output_arr[i] = calc_cov(main_arr_range, other_arr_range, ddof, minp)
return pandas.Series(output_arr)
return hpat_pandas_rolling_series_cov_impl
@sdc_overload_method(SeriesRollingType, 'kurt')
def hpat_pandas_series_rolling_kurt(self):
ty_checker = TypeChecker('Method rolling.kurt().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_kurt_impl
@sdc_overload_method(SeriesRollingType, 'max')
def hpat_pandas_series_rolling_max(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.core.window.Rolling.max
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_max.py
:language: python
:lines: 27-
:caption: Calculate the rolling maximum.
:name: ex_series_rolling_max
.. command-output:: python ./series/rolling/series_rolling_max.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.rolling <pandas.Series.rolling>`
Calling object with a Series.
:ref:`DataFrame.rolling <pandas.DataFrame.rolling>`
Calling object with a DataFrame.
:ref:`Series.max <pandas.Series.max>`
Similar method for Series.
:ref:`DataFrame.max <pandas.DataFrame.max>`
Similar method for DataFrame.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.rolling.max()` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling_max
Parameters
----------
self: :class:`pandas.Series.rolling`
input arg
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
ty_checker = TypeChecker('Method rolling.max().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_max_impl
@sdc_overload_method(SeriesRollingType, 'mean')
def hpat_pandas_series_rolling_mean(self):
ty_checker = TypeChecker('Method rolling.mean().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_mean_impl
@sdc_overload_method(SeriesRollingType, 'median')
def hpat_pandas_series_rolling_median(self):
ty_checker = TypeChecker('Method rolling.median().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_median_impl
@sdc_overload_method(SeriesRollingType, 'min')
def hpat_pandas_series_rolling_min(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.core.window.Rolling.min
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_min.py
:language: python
:lines: 27-
:caption: Calculate the rolling minimum.
:name: ex_series_rolling_min
.. command-output:: python ./series/rolling/series_rolling_min.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.rolling <pandas.Series.rolling>`
Calling object with a Series.
:ref:`DataFrame.rolling <pandas.DataFrame.rolling>`
Calling object with a DataFrame.
:ref:`Series.min <pandas.Series.min>`
Similar method for Series.
:ref:`DataFrame.min <pandas.DataFrame.min>`
Similar method for DataFrame.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.rolling.min()` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling_min
Parameters
----------
self: :class:`pandas.Series.rolling`
input arg
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
ty_checker = TypeChecker('Method rolling.min().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_min_impl
@sdc_overload_method(SeriesRollingType, 'quantile')
def hpat_pandas_series_rolling_quantile(self, quantile, interpolation='linear'):
ty_checker = TypeChecker('Method rolling.quantile().')
ty_checker.check(self, SeriesRollingType)
if not isinstance(quantile, Number):
ty_checker.raise_exc(quantile, 'float', 'quantile')
str_types = (Omitted, StringLiteral, UnicodeType)
if not isinstance(interpolation, str_types) and interpolation != 'linear':
ty_checker.raise_exc(interpolation, 'str', 'interpolation')
def hpat_pandas_rolling_series_quantile_impl(self, quantile, interpolation='linear'):
if quantile < 0 or quantile > 1:
raise ValueError('quantile value not in [0, 1]')
if interpolation != 'linear':
raise ValueError('interpolation value not "linear"')
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
def calc_quantile(arr, quantile, minp):
finite_arr = arr[numpy.isfinite(arr)]
if len(finite_arr) < minp:
return numpy.nan
else:
return arr_quantile(finite_arr, quantile)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = calc_quantile(arr_range, quantile, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = calc_quantile(arr_range, quantile, minp)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return hpat_pandas_rolling_series_quantile_impl
@sdc_overload_method(SeriesRollingType, 'skew')
def hpat_pandas_series_rolling_skew(self):
ty_checker = TypeChecker('Method rolling.skew().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_skew_impl
@sdc_overload_method(SeriesRollingType, 'sum')
def hpat_pandas_series_rolling_sum(self):
"""
Intel Scalable Dataframe Compiler User Guide
********************************************
Pandas API: pandas.core.window.Rolling.sum
Limitations
-----------
Series elements cannot be max/min float/integer. Otherwise SDC and Pandas results are different.
Examples
--------
.. literalinclude:: ../../../examples/series/rolling/series_rolling_sum.py
:language: python
:lines: 27-
:caption: Calculate rolling sum of given Series.
:name: ex_series_rolling_sum
.. command-output:: python ./series/rolling/series_rolling_sum.py
:cwd: ../../../examples
.. seealso::
:ref:`Series.rolling <pandas.Series.rolling>`
Calling object with a Series.
:ref:`DataFrame.rolling <pandas.DataFrame.rolling>`
Calling object with a DataFrame.
:ref:`Series.sum <pandas.Series.sum>`
Similar method for Series.
:ref:`DataFrame.sum <pandas.DataFrame.sum>`
Similar method for DataFrame.
Intel Scalable Dataframe Compiler Developer Guide
*************************************************
Pandas Series method :meth:`pandas.Series.rolling.sum()` implementation.
.. only:: developer
Test: python -m sdc.runtests -k sdc.tests.test_rolling.TestRolling.test_series_rolling_sum
Parameters
----------
self: :class:`pandas.Series.rolling`
input arg
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
ty_checker = TypeChecker('Method rolling.sum().')
ty_checker.check(self, SeriesRollingType)
return hpat_pandas_rolling_series_sum_impl
@sdc_overload_method(SeriesRollingType, 'std')
def hpat_pandas_series_rolling_std(self, ddof=1):
ty_checker = TypeChecker('Method rolling.std().')
ty_checker.check(self, SeriesRollingType)
if not isinstance(ddof, (int, Integer, Omitted)):
ty_checker.raise_exc(ddof, 'int', 'ddof')
def hpat_pandas_rolling_series_std_impl(self, ddof=1):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
def culc_std(arr, ddof, minp):
finite_arr = arr[numpy.isfinite(arr)]
if len(finite_arr) < minp:
return numpy.nan
else:
return arr_std(finite_arr, ddof)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = culc_std(arr_range, ddof, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = culc_std(arr_range, ddof, minp)
return pandas.Series(output_arr, input_series._index, name=input_series._name)
return hpat_pandas_rolling_series_std_impl
@sdc_overload_method(SeriesRollingType, 'var')
def hpat_pandas_series_rolling_var(self, ddof=1):
ty_checker = TypeChecker('Method rolling.var().')
ty_checker.check(self, SeriesRollingType)
if not isinstance(ddof, (int, Integer, Omitted)):
ty_checker.raise_exc(ddof, 'int', 'ddof')
def hpat_pandas_rolling_series_var_impl(self, ddof=1):
win = self._window
minp = self._min_periods
input_series = self._data
input_arr = input_series._data
length = len(input_arr)
output_arr = numpy.empty(length, dtype=float64)
def culc_var(arr, ddof, minp):
finite_arr = arr[numpy.isfinite(arr)]
if len(finite_arr) < minp:
return numpy.nan
else:
return arr_var(finite_arr, ddof)
boundary = min(win, length)
for i in prange(boundary):
arr_range = input_arr[:i + 1]
output_arr[i] = culc_var(arr_range, ddof, minp)
for i in prange(boundary, length):
arr_range = input_arr[i + 1 - win:i + 1]
output_arr[i] = culc_var(arr_range, ddof, minp)
return | pandas.Series(output_arr, input_series._index, name=input_series._name) | pandas.Series |
"""
PREPROCESSING DATA
Disaster Response Pipeline Project
Udacity - Data Science Nanodegree
Sample Script Execution:
> python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db
Arguments:
1) CSV file containing messages (disaster_messages.csv)
2) CSV file containing categories (disaster_categories.csv)
3) SQLite destination database (DisasterResponse.db)
"""
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Load and merge messages and categories datasets
Args:
messages_filepath: string, Filepath for csv file containing messages dataset.
categories_filepath: string, Filepath for csv file containing categories dataset.
Returns:
df: dataframe, Dataframe containing merged content of messages and categories datasets.
"""
# Load messages dataset
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = | Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"]) | pandas.Series |
# Copyright WillianFuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module plot.py. Module matplotlib is not required as it's mocked accordingly.
"""
import mock
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
import causalimpact.plot as plotter
@pytest.fixture
def inferences(rand_data):
df = pd.DataFrame(np.random.rand(len(rand_data), 9))
df.columns = [
'complete_preds_means',
'complete_preds_lower',
'complete_preds_upper',
'point_effects_means',
'point_effects_lower',
'point_effects_upper',
'post_cum_effects_means',
'post_cum_effects_lower',
'post_cum_effects_upper'
]
return df
def test_build_data():
pre_data = pd.DataFrame([0, 1, np.nan])
post_data = pd.DataFrame([3, 4, np.nan], index=[3, 4, 5])
inferences = pd.DataFrame([0, 1, 2, 3, 4, 5])
pre_data, post_data, inferences = plotter.build_data(pre_data, post_data, inferences)
expected_pre_data = pd.DataFrame([0, 1]).astype(np.float64)
pd.testing.assert_frame_equal(pre_data, expected_pre_data)
expected_post_data = pd.DataFrame([3, 4], index=[3, 4]).astype(np.float64)
| pd.testing.assert_frame_equal(post_data, expected_post_data) | pandas.testing.assert_frame_equal |
from __future__ import division #brings in Python 3.0 mixed type calculations
import numpy as np
import os
import pandas as pd
import sys
#find parent directory and import model
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from base.uber_model import UberModel, ModelSharedInputs
class BeerexInputs(ModelSharedInputs):
"""
Input class for Beerex
"""
def __init__(self):
"""Class representing the inputs for Beerex"""
super(BeerexInputs, self).__init__()
#self.incorporation_depth = pd.Series([], dtype="float")
self.application_rate = pd.Series([], dtype="float")
self.application_method = pd.Series([], dtype="object")
self.crop_type = pd.Series([], dtype="object")
# self.application_units = pd.Series([], dtype="object")
self.empirical_residue = pd.Series([], dtype="object")
self.empirical_pollen = pd.Series([], dtype="float")
self.empirical_nectar = pd.Series([], dtype="float")
self.empirical_jelly = pd.Series([], dtype="float")
self.adult_contact_ld50 = pd.Series([], dtype="float")
self.adult_oral_ld50 = pd.Series([], dtype="float")
self.adult_oral_noael = pd.Series([], dtype="float")
self.larval_ld50 = pd.Series([], dtype="float")
self.larval_noael = pd.Series([], dtype="float")
self.log_kow = pd.Series([], dtype="float")
self.koc = pd.Series([], dtype="float")
self.mass_tree_vegetation = pd.Series([], dtype="float")
self.lw1_jelly = pd.Series([], dtype="float")
self.lw2_jelly = pd.Series([], dtype="float")
self.lw3_jelly = pd.Series([], dtype="float")
self.lw4_nectar = pd.Series([], dtype="float")
self.lw4_pollen = pd.Series([], dtype="float")
self.lw5_nectar = pd.Series([], dtype="float")
self.lw5_pollen = pd.Series([], dtype="float")
self.ld6_nectar = pd.Series([], dtype="float")
self.ld6_pollen = pd.Series([], dtype="float")
self.lq1_jelly = pd.Series([], dtype="float")
self.lq2_jelly = pd.Series([], dtype="float")
self.lq3_jelly = pd.Series([], dtype="float")
self.lq4_jelly = pd.Series([], dtype="float")
self.aw_cell_nectar = pd.Series([], dtype="float")
self.aw_cell_pollen = pd.Series([], dtype="float")
self.aw_brood_nectar = pd.Series([], dtype="float")
self.aw_brood_pollen = pd.Series([], dtype="float")
self.aw_comb_nectar = pd.Series([], dtype="float")
self.aw_comb_pollen = pd.Series([], dtype="float")
self.aw_fpollen_nectar = pd.Series([], dtype="float")
self.aw_fpollen_pollen = pd.Series([], dtype="float")
self.aw_fnectar_nectar = pd.Series([], dtype="float")
self.aw_fnectar_pollen = pd.Series([], dtype="float")
self.aw_winter_nectar = pd.Series([], dtype="float")
self.aw_winter_pollen = pd.Series([], dtype="float")
self.ad_nectar = pd.Series([], dtype="float")
self.ad_pollen = pd.Series([], dtype="float")
self.aq_jelly = pd.Series([], dtype="float")
class BeerexOutputs(object):
"""
Output class for Beerex
"""
def __init__(self):
"""Class representing the outputs for Beerex"""
super(BeerexOutputs, self).__init__()
self.out_eec_spray = pd.Series(name="out_eec_spray", dtype="float")
self.out_eec_soil = pd.Series(name="out_eec_soil", dtype="float")
self.out_eec_seed = pd.Series(name="out_eec_seed", dtype="float")
self.out_eec_tree = pd.Series(name="out_eec_tree", dtype="float")
self.out_eec = pd.Series(name="out_eec", dtype="float")
self.out_lw1_total_dose = pd.Series(name="out_lw1_total_dose", dtype="float")
self.out_lw2_total_dose = pd.Series(name="out_lw2_total_dose", dtype="float")
self.out_lw3_total_dose = pd.Series(name="out_lw3_total_dose", dtype="float")
self.out_lw4_total_dose = pd.Series(name="out_lw4_total_dose", dtype="float")
self.out_lw5_total_dose = pd.Series(name="out_lw5_total_dose", dtype="float")
self.out_ld6_total_dose = pd.Series(name="out_ld6_total_dose", dtype="float")
self.out_lq1_total_dose = pd.Series(name="out_lq1_total_dose", dtype="float")
self.out_lq2_total_dose = pd.Series(name="out_lq2_total_dose", dtype="float")
self.out_lq3_total_dose = pd.Series(name="out_lq3_total_dose", dtype="float")
self.out_lq4_total_dose = pd.Series(name="out_lq4_total_dose", dtype="float")
self.out_aw_cell_total_dose = pd.Series(name="out_aw_cell_total_dose", dtype="float")
self.out_aw_brood_total_dose = pd.Series(name="out_aw_brood_total_dose", dtype="float")
self.out_aw_comb_total_dose = pd.Series(name="out_aw_comb_total_dose", dtype="float")
self.out_aw_pollen_total_dose = pd.Series(name="out_aw_pollen_total_dose", dtype="float")
self.out_aw_nectar_total_dose = pd.Series(name="out_aw_nectar_total_dose", dtype="float")
self.out_aw_winter_total_dose = pd.Series(name="out_aw_winter_total_dose", dtype="float")
self.out_ad_total_dose = | pd.Series(name="out_ad_total_dose", dtype="float") | pandas.Series |
import argparse
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image
import pandas as pd
import cv2
import json
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import functional as F
from torchvision.models.detection import fasterrcnn_resnet50_fpn
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# from utils import utils
class FramesDataset(Dataset):
"""Creates a dataset that can be fed into DatasetLoader
Args:
frames (list): A list of cv2-compatible numpy arrays or
a list of PIL Images
"""
def __init__(self, frames):
# Convert to list of tensors
x = [F.to_tensor(img) for img in frames]
# Define which device to use, either gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Send the frames to device
x_device = [img.to(device) for img in x]
self.x = x_device #x
def __getitem__(self, idx):
return self.x[idx]
def __len__(self):
return len(self.x)
class ObjectDetector():
"""ObjectDetector class with staticmethods that can be called from outside by importing as below:
from helmet_detector.detector import ObjectDetector
The staic methods can be accessed using ObjectDetector.<name of static method>()
"""
@staticmethod
def load_custom_model(model_path=None, num_classes=None):
"""Load a model from local file system with custom parameters
Load FasterRCNN model using custom parameters
Args:
model_path (str): Path to model parameters
num_classes (int): Number of classes in the custom model
Returns:
model: Loaded model in evaluation mode for inference
"""
# load an object detection model pre-trained on COCO
model = fasterrcnn_resnet50_fpn(pretrained=True)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features,num_classes)
# load previously fine-tuned parameters
# Define which device to use, either gpu or cpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
if torch.cuda.is_available():
model.load_state_dict(torch.load(model_path))
model.to(device)
else:
model.load_state_dict(torch.load(model_path, map_location=device))
# Put the model in evaluation mode
model.eval()
return model
@staticmethod
def run_detection(img, loaded_model):
""" Run inference on single image
Args:
img: image in 'numpy.ndarray' format
loaded_model: trained model
Returns:
Default predictions from trained model
"""
# need to make sure we have 3d tensors of shape [C, H, W]
with torch.no_grad():
prediction = loaded_model(img)
return prediction
@staticmethod
def to_dataframe_highconf(predictions, conf_thres, frame_id):
""" Converts the default predictions into a Pandas DataFrame, only predictions with score greater than conf_thres
Args:
predictions (list): Default FasterRCNN implementation output.
This is a list of dicts with keys ['boxes','labels','scores']
frame_id : frame id
conf_thres: score greater than this will be kept as detections
Returns:
A Pandas DataFrame with columns
['frame_id','class_id','score','x1','y1','x2','y2']
"""
df_list = []
for i, p in enumerate(predictions):
boxes = p['boxes'].detach().cpu().tolist()
labels = p['labels'].detach().cpu().tolist()
scores = p['scores'].detach().cpu().tolist()
df = pd.DataFrame(boxes, columns=['x1','y1','x2','y2'])
df['class_id'] = labels
df['score'] = scores
df['frame_id'] = frame_id
df_list.append(df)
df_detect = pd.concat(df_list, axis=0)
df_detect = df_detect[['frame_id','class_id','score','x1','y1','x2','y2']]
# Keep predictions with high confidence, with score greater than conf_thres
df_detect = df_detect.loc[df_detect['score'] >= conf_thres]
return df_detect
@staticmethod
def to_dataframe(predictions):
""" Converts the default predictions into a Pandas DataFrame
Args:
predictions (list): Default FasterRCNN implementation output.
This is a list of dicts with keys ['boxes','labels','scores']
Returns:
A Pandas DataFrame with columns
['frame_id','class_id','score','x1','y1','x2','y2']
"""
df_list = []
for i, p in enumerate(predictions):
boxes = p['boxes'].detach().cpu().tolist()
labels = p['labels'].detach().cpu().tolist()
scores = p['scores'].detach().cpu().tolist()
df = pd.DataFrame(boxes, columns=['x1','y1','x2','y2'])
df['class_id'] = labels
df['score'] = scores
df['frame_id'] = i
df_list.append(df)
df_detect = pd.concat(df_list, axis=0)
df_detect = df_detect[['frame_id','class_id','score','x1','y1','x2','y2']]
return df_detect
@staticmethod
def calc_iou(boxA, boxB):
# https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA) * max(0, yB - yA)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0]) * (boxA[3] - boxA[1])
boxBArea = (boxB[2] - boxB[0]) * (boxB[3] - boxB[1])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
@staticmethod
def evaluate_detections_iou(gt, det, iou_threshold):
"""Evaluate and obtain FN and FP records between detection and annotations
Args:
df_detect (pandas.DataFrame): Detected boxes in a Pandas Dataframe
with columns ['frame_id','class_id','score','x1','y1','x2','y2']
df_annot (pandas.DataFrame): Known/annotation boxes in a Pandas
Dataframe with columns ['frame_id','class_id','x1','y1','x2','y2']
Returns:
result (pandas.DataFrame): Count of total number of objects in gt and det, and tp, fn, fp
with columns ['num_object_gt', 'num_object_det', 'tp', 'fn', 'fp']
df_fn (pandas.DataFrame): False negative records in a Pandas Dataframe
with columns ['frame_id','class_id','x1','y1','x2','y2']
df_fp (pandas.DataFrame): False positive records in a Pandas Dataframe
with columns ['frame_id','class_id', 'score', 'x1','y1','x2','y2']
"""
if (gt is not None) and (det is not None):
matched = []
for g in range(gt.shape[0]):
count = 0
for d in range(det.shape[0]):
iou = ObjectDetector.calc_iou(np.array(gt.iloc[g,2:]), np.array(det.iloc[d,3:]))
if (iou > iou_threshold):
if (count == 0):
max_conf = det.iloc[d,2]
temp = [g,d,iou, det.iloc[d,2]]
count +=1
elif (count > 0):
print("Multiple detections found, keep only with highest confidence")
if (max_conf < det.iloc[d,2]):
max_conf = det.iloc[d,2]
temp = [g,d,iou, det.iloc[d,2]]
count +=1
if (count != 0):
matched.append(temp)
df_tp = pd.DataFrame(matched, columns = ['gt_index', 'det_index', 'iou', 'det_conf'])
# To qualitatively find detection error, output fn and fp boxes. just visualize them on the frame
# Get unmatched gt - these are FNs
df_fn = []
num_fn = 0
for i in range(gt.shape[0]):
if i not in df_tp['gt_index'].tolist():
df_fn.append(gt.iloc[i,:])
num_fn +=1
if num_fn > 0:
df_fn = pd.DataFrame(data=df_fn)
df_fn.columns = ['frame_id','class_id','x1','y1','x2','y2']
else:
df_fn = None
# Get unmatched det - these are FPs
df_fp = []
num_fp = 0
for i in range(det.shape[0]):
if i not in df_tp['det_index'].tolist():
df_fp.append(det.iloc[i,:])
num_fp +=1
if num_fp > 0:
df_fp = pd.DataFrame(data=df_fp)
df_fp.columns = ['frame_id','class_id', 'score', 'x1','y1','x2','y2']
else:
# print("num_fp = 0 in frame_id {}".format(gt.iloc[0,0]))
df_fp = None
# To quantify detection error, output number of helmets in gt, number of helmets in det, tp, fn, fp
frame_id = gt.iloc[0,0]
tp = len(df_tp['gt_index'].unique())
result = []
result.append([frame_id,
gt.shape[0],
det.shape[0],
tp,
num_fn,
num_fp])
result = pd.DataFrame(data=result, columns = ['frame_id', 'num_object_gt', 'num_object_det', 'tp', 'fn', 'fp'])
else:
result = None
df_fn = None
df_fp = None
return result, df_fn, df_fp
@staticmethod
def find_frames_high_fn_fp(eval_det, fn_thres, fp_thres):
""" Find frames with high fn and fp, fn >= fn_thres and fp >= fp_thres
Arg:
eval_det: Detection evaluation matrix for whole play
fn_thres: Get a list of frames where fn is greater than equal to this value
fp_thres: Get a list of frames where fn is greater than equal to this value
Return:
frame_list: List of frames with high fn and fp
"""
frame_list = eval_det[(eval_det['fn'] >= fn_thres) & (eval_det['fp'] >= fp_thres)]['frame_id'].tolist()
return frame_list
@staticmethod
def run_detection_video(video_in, model_path, full_video=True, subset_video=60, conf_thres=0.9):
""" Run detection on video
Args:
video_in: Input video path
model_path: Location of the pretrained model.pt
full_video: Bool to indicate whether to run the whole video, default = False
subset_video: Number of frames to run detection on
conf_thres = Only consider detections with score higher than conf_thres, default = 0.9
Returns:
Predicted detection for all the frames in a video
df_predictions (pandas.DataFrame): prediction of detected object for all frames
with columns ['frame_id', 'class_id', 'score', 'x1', 'y1', 'x2', 'y2']
"""
# Capture the input video
vid = cv2.VideoCapture(video_in)
# Get video title
vid_title = os.path.splitext(os.path.basename(video_in))[0]
# Get total number of frames
num_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
# load model
num_classes = 2
model = ObjectDetector.load_custom_model(model_path=model_path, num_classes=num_classes)
# if running for the whole video, then change the size of subset_video with total number of frames
if full_video:
subset_video = int(num_frames)
df_predictions = [] # predictions for whole video
for i in range(subset_video): #383
ret, frame = vid.read()
print("Processing frame#: {} running detection for videos".format(i))
# Get detection for this frame
list_frame = [frame]
dataset_frame = FramesDataset(list_frame)
prediction = ObjectDetector.run_detection(dataset_frame, model)
df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, i)
df_predictions.append(df_prediction)
# Concatenate predictions for all frames of the video
df_predictions = pd.concat(df_predictions)
return df_predictions
@staticmethod
def run_detection_frames(frames, model_path, batch_size=4, conf_thres=0.9, start_frame=0, end_frame=-1):
""" Run detection on list of frames
Args:
frames: List of frames between start_frame and end_frame of a full play video
model_path: Location of the pretrained model.pt
batch_size (int): Size of inference minibatch --> not sure we need this
conf_thres: Only consider detections with score higher than conf_thres, default = 0.9
start_frame: First frame number to output. Default is 0.
end_frame: Last frame number to output. If less than 1 then take all frames
Returns:
Predicted detection for all the frames between start_frame and end_frame of a full play video
df_predictions (pandas.DataFrame): prediction of detected object for all frames
with columns ['frame_id', 'class_id', 'score', 'x1', 'y1', 'x2', 'y2']
Todo:
Figure out how reduce confusion around start_frame/end_frame var collision with utils.frames_from_video()
"""
if end_frame>=1:
assert start_frame<=end_frame
if end_frame < 0:
end_frame = start_frame + len(frames) -1
# load model
num_classes = 2
model = ObjectDetector.load_custom_model(model_path=model_path, num_classes=num_classes)
df_predictions = [] # predictions for all frames
count = 0
for i in range(start_frame, end_frame):
# Get detection for this frame
dataset_frame = FramesDataset([frames[count]])
prediction = ObjectDetector.run_detection(dataset_frame, model)
df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, i)
df_predictions.append(df_prediction)
count+=1
# dataset = FramesDataset(frames)
# batcher = DataLoader(dataset, batch_size=batch_size, shuffle=False)
# for batch in batcher:
# prediction = ObjectDetector.run_detection(batch, model)
# df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, batch)
# df_predictions.append(df_prediction)
# Concatenate predictions for all frames of the video
df_predictions = pd.concat(df_predictions)
return df_predictions
@staticmethod
def get_gt_frame(frame_id, cur_boxes):
"""Get ground truth annotations on the frames
Args:
frame_id: Frame id
cur_boxes: Current annotation boxes "left", "width", "top", "height"
Returns:
box_ret: ground truth boxes in a Pandas
Dataframe with columns ['frame_id','class_id','x1','y1','x2','y2']
"""
box_out = []
for box in cur_boxes:
box_out.append([frame_id, 1, box[0],box[2],box[0]+box[1], box[2]+box[3]])
# Return gt dataframe
box_ret = pd.DataFrame(data = box_out, columns = ['frame_id','class_id','x1','y1','x2','y2'])
return box_ret
@staticmethod
def run_detection_eval_video(video_in, gtfile_name, model_path, full_video=True, subset_video=60, conf_thres=0.9, iou_threshold = 0.5):
""" Run detection on video
Args:
video_in: Input video path
gtfile_name: Ground Truth annotation json file name
model_path: Location of the pretrained model.pt
full_video: Bool to indicate whether to run the whole video, default = False
subset_video: Number of frames to run detection on
conf_thres = Only consider detections with score higher than conf_thres, default = 0.9
iou_threshold = Match detection with ground trurh if iou is higher than iou_threshold, default = 0.5
Returns:
Predicted detection for all the frames in a video, evaluation for detection, a dataframe with bounding boxes for
false negatives and false positives
df_predictions (pandas.DataFrame): prediction of detected object for all frames
with columns ['frame_id', 'class_id', 'score', 'x1', 'y1', 'x2', 'y2']
eval_results (pandas.DataFrame): Count of total number of objects in gt and det, and tp, fn, fp for all frames
with columns ['frame_id', 'num_object_gt', 'num_object_det', 'tp', 'fn', 'fp']
fns (pandas.DataFrame): False negative records in a Pandas Dataframe for all frames
with columns ['frame_id','class_id','x1','y1','x2','y2'], return empty dataframe if no false negatives
fps (pandas.DataFrame): False positive records in a Pandas Dataframe for all frames
with columns ['frame_id','class_id', 'score', 'x1','y1','x2','y2'], return empty dataframe if no false positives
"""
# Capture the input video
vid = cv2.VideoCapture(video_in)
# Get video title
vid_title = os.path.splitext(os.path.basename(video_in))[0]
# Get total number of frames
num_frames = vid.get(cv2.CAP_PROP_FRAME_COUNT)
print("********** Num of frames", num_frames)
# load model
num_classes = 2
model = ObjectDetector.load_custom_model(model_path=model_path, num_classes=num_classes)
print("Pretrained model loaded")
# Get GT annotations
gt_labels = pd.read_csv('/home/ec2-user/SageMaker/0Artifact/helmet_detection/input/train_labels.csv')#.fillna(0)
video = os.path.basename(video_in)
print("Processing video: ",video)
labels = gt_labels[gt_labels['video']==video]
# if running for the whole video, then change the size of subset_video with total number of frames
if full_video:
subset_video = int(num_frames)
# frames = []
df_predictions = [] # predictions for whole video
eval_results = [] # detection evaluations for the whole video
fns = [] # false negative detections for the whole video
fps = [] # false positive detections for the whole video
for i in range(subset_video):
ret, frame = vid.read()
print("Processing frame#: {} running detection and evaluation for videos".format(i+1))
# Get detection for this frame
list_frame = [frame]
dataset_frame = FramesDataset(list_frame)
prediction = ObjectDetector.run_detection(dataset_frame, model)
df_prediction = ObjectDetector.to_dataframe_highconf(prediction, conf_thres, i)
df_predictions.append(df_prediction)
# Get label for this frame
cur_label = labels[labels['frame']==i+1] # get this frame's record
cur_boxes = cur_label[['left','width','top','height']].values
gt = ObjectDetector.get_gt_frame(i+1, cur_boxes)
# Evaluate detection for this frame
eval_result, fn, fp = ObjectDetector.evaluate_detections_iou(gt, df_prediction, iou_threshold)
eval_results.append(eval_result)
if fn is not None:
fns.append(fn)
if fp is not None:
fps.append(fp)
# Concatenate predictions, evaluation resutls, fns and fps for all frames of the video
df_predictions = pd.concat(df_predictions)
eval_results = pd.concat(eval_results)
# Concatenate fns if not empty, otherwise create an empty dataframe
if not fns:
fns = pd.DataFrame()
else:
fns = | pd.concat(fns) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Use this URL for a Google Colab Demo of this class and its usage:
https://colab.research.google.com/drive/154_2tvDn_36pZzU_XkSv9Xvd3KjQCw1U
"""
from datetime import timedelta, datetime, timezone
import sys, os, time, random
import pandas as pd
import json
import csv
import sqlite3
from sqlite3 import Error
import ccxt
import yfinance as yf
class Data():
"""
Class Wraps CCXT and Yahoo Finance Data Fetching Functions
"""
timedeltas_timeframe_suffixes = {
"s": timedelta(seconds=1),
"m": timedelta(minutes=1),
"h": timedelta(hours=1),
"d": timedelta(days=1),
"w": timedelta(days=7),
"M": timedelta(days=31),
"Y": timedelta(weeks=52), # option for fetch trades
"y": timedelta(weeks=52) # lowercase alias
}
class CCXT():
"""
The majority of code credit goes to:
https://github.com/Celeborn2BeAlive/cryptobigbro
exchange_id: Any exchange id available thru CCXT
https://github.com/ccxt/ccxt/wiki/Manual#exchanges
symbol: A slash is used for all symbols except on BitMEX Futures.
eg. XRPH20 has no slash,
but BTC/USD and ETH/USD are how they identify the USD pairs.
timeframe: Any timeframe available on the chosen exchange.
candle_amount: Use 'all' to get FULL candle history.
Default is 500.
trades_amount: Use 'all' to get FULL trade history.
Default is '10m' aka 10 mins.
save_path: Use if you want to save data as .csv file or SQLite DB.
save_format: 'csv' or 'sqlite' are the database options.
TT_Format: True would set columns with a prefix using the base symbol
eg. BTC:open, BTC:close, BTC:volume
Example Usage:
from tensortrade.utils.ccxt_data_fetcher import CCXT_Data
# Fetch Trades
trades = CCXT_Data.fetch_trades(
exchange = 'bitmex',
symbol = 'BTC/USD',
trades_amount = '10m', ## Get 10 minutes worth of trades
save_path = '/content/drive/My Drive/',
save_format = 'csv'
)
# Fetch Candles
ohlcv = CCXT_Data.fetch_candles(
exchange = 'binance',
symbol = 'BTC/USDT',
timeframe = '1d',
candle_amount = '1000', ## Get 1000 1 Day candles
save_path = '/content/drive/My Drive/Crypto_SQLite_DBs/',
save_format = 'sqlite'
)
"""
@classmethod
def fetch_candles(cls,
exchange: str = 'binance',
symbol: str = 'BTC/USDT',
timeframe: str = '1d',
candle_amount: int = 1000,
save_path = '',
save_format: str = 'csv',
limit: int = 1000,
TT_Format=False):
"""
Fetch OHLCV aka Candle Data using CCXT
Able to fetch full available candle history
Options to save to CSV or SQLite DB files
"""
mk_path = ''
path = save_path
path_to_db_file = ''
csv = False
sqlite = False
if path:
if save_format.lower() == 'csv':
csv = True
if save_format.lower() == 'sqlite':
sqlite = True
exchange_id = exchange.lower()
symbol = symbol.upper()
# Init CCXT exchange object
ccxt_exchange = getattr(ccxt, exchange_id)({
'enableRateLimit': True
})
ccxt_exchange.load_markets() # Requisite CCXT step
all_symbols = [symbol for symbol in ccxt_exchange.symbols] # Get all valid symbols on exchange
all_timeframes = [tf for tf in ccxt_exchange.timeframes] # Get all valid timeframes
timeframe = timeframe if timeframe in all_timeframes else None
symbol = symbol if symbol in all_symbols else None
# Skip to next symbol if not found on exchange
if not symbol:
print("[ERROR] Unsupported symbol {} for exchange {}.".format(symbol, exchange_id))
return None
if not timeframe: # Skip if TF not available on symbol
print("[ERROR] Unsupported timeframe {} for {}.".format(timeframe, exchange_id))
return None
print("-- Fetching {} candles for {}".format(timeframe, symbol))
# Grab most recent timestamp if data exists already
if type(candle_amount) != str:
if candle_amount > 0:
if timeframe.endswith('M'):
_ = timeframe.split('M')
c = int(_[0])
# Special case for month because it has not fixed timedelta
since = datetime.utcnow() - (c * candle_amount * Data._timedelta(timeframe))
since = datetime(since.year, since.month, 1, tzinfo=timezone.utc)
else:
since = datetime.utcnow() - (candle_amount * Data._timedelta(timeframe))
elif candle_amount.lower() == 'all':
since = datetime(1970, 1, 1, tzinfo=timezone.utc)
else:
if timeframe.endswith('M'):
since = datetime(1970, 1, 1, tzinfo=timezone.utc)
else:
since = datetime.utcnow() - (500 * Data._timedelta(timeframe))
since = Data._earliest_datetime(since) # sanitize if date is earlier than 1970
main_path = ccxt_exchange.id + '/' + symbol.replace('/','_') + '_' + timeframe
if csv:
path_to_db_file = path + 'csv/' + main_path + '.csv'
mk_path = path + 'csv/'
path = path + 'csv/' + ccxt_exchange.id + '/'
elif sqlite:
path_to_db_file = path + 'sqlite/' + main_path + '.sqlite'
mk_path = path + 'sqlite/'
path = path + 'sqlite/' + ccxt_exchange.id + '/'
df = pd.DataFrame()
df_db = pd.DataFrame() # If DB File exists, load it to grab most recent candle timestamp
# Fetch candles till done
while True:
# Can make this more efficient by making it save the timestamp, and load it if else
if path and os.path.exists(path_to_db_file):
#print("\t\t-- Loading existing history from file {} to get next timestamp.".format(path_to_db_file))
if csv:
df_db = | pd.read_csv(path_to_db_file) | pandas.read_csv |
"""
Create a dashboard using plotly dash open source version. It will generate the same dashboard
which is generated using voila notebook
Pre-requisite
- dash
- dash_flexbox_grid
- dash-leaflet
- dash_extensions
- jsbeautifier
"""
import dash_flexbox_grid as dfx
import dash_html_components as html
import dash_core_components as dcc
import dash_leaflet as dl
from dash import Dash
from dash.dependencies import Output, Input
from dash.exceptions import PreventUpdate
from dash_extensions.javascript import assign
import plotly.express as px
import pandas as pd
from dashboard.evdata import EVStation, TrafficMessage, get_partition_list, nearest_messages, empty_geojson
# initialize EV and traffic data
ev = EVStation()
traffic = TrafficMessage()
# Geojson layer style.
point_to_layer_traffic = assign("""function(feature, latlng, context) {
circle = L.circle(latlng, radius=3);
circle.setStyle({color: '#424344'});
return circle;}""")
# Geojson layer style.
point_to_layer_ev = assign("""function(feature, latlng, context) {
circle = L.circle(latlng, radius=20);
circle.setStyle({color: '#c20326'});
return circle;}""")
app = Dash('')
app.scripts.config.serve_locally = True
def get_info(feature=None):
"""
Information panel for charging station
:param feature:
:return:
"""
header = [html.H3("EV Charging Station Details"), html.P()]
if not feature:
return header + [html.P("Hoover over a Charging Station")]
return header + [html.B("Station Name:{}".format(feature["properties"]["name"])), html.Br(), html.Br(),
"Language Code: {}".format(feature['properties']['description']), html.Br(), html.Br(),
"Operating Days:{}".format(feature['properties']['days']), html.Br(),
]
# HTML element for information
info = html.Div(children=get_info(), id="info", className="info",
style={"position": "absolute", "top": "10px", "right": "10px", "z-index": "1000"})
# Layout for map, and chart elements
app.layout = dfx.Grid(id='grid', fluid=True, children=[
dfx.Row(children=[html.H3("EV Charging Station Demo Dashboard"), html.P()]),
dfx.Row(children=[
dfx.Col(xs=12, lg=24, children=[
html.Div([
# Setup a map with the edit control.
dl.Map(center=[52.5, 13.3], zoom=14, children=[dl.TileLayer(),
dl.GeoJSON(id="geo_traffic", options=dict(
pointToLayer=point_to_layer_traffic)),
dl.GeoJSON(id="geo_ev",
options=dict(pointToLayer=point_to_layer_ev)),
info,
dl.FeatureGroup([
dl.EditControl(id="edit_control")]),
],
style={'width': '100%', 'height': '50vh', 'margin': "auto", "display": "inline-block"},
id="map"),
])
])
]),
dfx.Row(children=[
dfx.Col(xs=6, lg=6, children=html.P("Sum of Traffic Flow Messages/EV Station",
style={'text-align': 'center', 'font-size': '25px'})),
dfx.Col(xs=6, lg=6,
children=html.P("EV Station Companies", style={'text-align': 'center', 'font-size': '25px'}))
]),
dfx.Row(children=[
dfx.Col(xs=6, lg=6, children=html.Div([dcc.Graph(id="bar-chart")])),
dfx.Col(xs=6, lg=6, children=html.Div([dcc.Graph(id="pie-chart")]))
])
])
@app.callback(Output("info", "children"), [Input("geo_ev", "hover_feature")])
def info_hover(feature):
"""
Generate Information Panel
:param feature:
:return:
"""
return get_info(feature)
@app.callback([Output("geo_traffic", "data"),
Output("geo_ev", "data"),
Output("pie-chart", "figure"),
Output("bar-chart", "figure")],
[Input("edit_control", "geojson"), Input("edit_control", "action")])
def ev_station(geo, action):
"""
This called after drawing a rectangle on the map, geo is geojson of the drawn control on the map
and action is event generated e.g. drawstart of rectangle etc.
:param geo:
:param action:
:return:
"""
# clear map id delete action is taken
if action and action['type'] == 'draw:deletestop':
return clear_map()
# if geojson is null or empty features, do not update
elif not geo or len(geo['features']) == 0 or not action:
raise PreventUpdate
# if layer_type is not present, , do not update
elif action and not 'layer_type' in action:
raise PreventUpdate
# if rectangle drawn on the map then compute and draw data on the map and update charts
elif action and action['layer_type'] == 'rectangle' and action['type'] == 'draw:drawstop':
# compute coordinates of the rectangle
x1 = geo['features'][0]['properties']['_bounds'][0]['lat']
y1 = geo['features'][0]['properties']['_bounds'][0]['lng']
x2 = geo['features'][0]['properties']['_bounds'][1]['lat']
y2 = geo['features'][0]['properties']['_bounds'][1]['lng']
p_list = get_partition_list(y1, x1, y2, x2)
geo_ev = ev.get_geojson(p_list, y1, x1, y2, x2)
pie_fig = pie_chart(ev.data)
geo_tr = traffic.get_geojson(p_list, y1, x1, y2, x2)
bar_fig = bar_chart(ev.data, traffic.data)
return geo_tr, geo_ev, pie_fig, bar_fig
else:
raise PreventUpdate
def clear_map():
"""
Clear map
:return:
"""
df_empty = | pd.DataFrame({'name': [], 'values': []}) | pandas.DataFrame |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import pandas as pd
from scipy.stats import chisquare
def _chi2(bad_rates: List[Dict], overall_rate: float) -> float:
f_obs = [_bin["bad"] for _bin in bad_rates]
f_exp = [_bin["total"] * overall_rate for _bin in bad_rates]
chi2 = chisquare(f_obs=f_obs, f_exp=f_exp)[0]
return chi2
def _check_diff_woe(bad_rates: List[Dict], diff_woe_threshold: float) -> Union[None, int]:
woe_delta: np.ndarray = np.abs(np.diff([bad_rate["woe"] for bad_rate in bad_rates]))
min_diff_woe = min(sorted(list(set(woe_delta))))
if min_diff_woe < diff_woe_threshold:
return list(woe_delta).index(min_diff_woe)
else:
return None
def _mono_flags(bad_rates: List[Dict]) -> bool:
bad_rate_diffs = np.diff([bad_rate["bad_rate"] for bad_rate in bad_rates])
positive_mono_diff = np.all(bad_rate_diffs > 0)
negative_mono_diff = np.all(bad_rate_diffs < 0)
return True in [positive_mono_diff, negative_mono_diff]
def _find_index_of_diff_flag(bad_rates: List[Dict]) -> int:
bad_rate_diffs = np.diff([bad_rate["bad_rate"] for bad_rate in bad_rates])
idx = list(bad_rate_diffs > 0).index(pd.Series(bad_rate_diffs > 0).value_counts().sort_values().index.tolist()[0])
return idx
def _merge_bins_chi(x: np.ndarray, y: np.ndarray, bad_rates: List[Dict], bins: List):
idx = _find_index_of_diff_flag(bad_rates)
if idx == 0:
del bins[1]
elif idx == len(bad_rates) - 2:
del bins[len(bins) - 2]
else:
temp_bins = copy.deepcopy(bins)
del temp_bins[idx + 1]
temp_bad_rates, temp_overall_rate = bin_bad_rate(x, y, temp_bins)
chi_1 = _chi2(temp_bad_rates, temp_overall_rate)
del temp_bins
temp_bins = copy.deepcopy(bins)
del temp_bins[idx + 2]
temp_bad_rates, temp_overall_rate = bin_bad_rate(x, y, temp_bins)
chi_2 = _chi2(temp_bad_rates, temp_overall_rate)
if chi_1 < chi_2:
del bins[idx + 1]
else:
del bins[idx + 2]
bad_rates, _ = bin_bad_rate(x, y, bins)
return bad_rates, bins
def _merge_bins_min_pct(
x: np.ndarray, y: np.ndarray, bad_rates: List[Dict], bins: List, cat: bool = False
):
idx = [
pct for pct in [bad_rates[i]["pct"] for i in range(len(bad_rates))]
].index(min([bad_rate["pct"] for bad_rate in bad_rates]))
if cat:
if idx == 0:
bins[idx + 1] += bins[idx]
elif idx == len(bad_rates) - 1:
bins[idx - 1] += bins[idx]
else:
if bad_rates[idx - 1]["pct"] < bad_rates[idx + 1]["pct"]:
bins[idx - 1] += bins[idx]
else:
bins[idx + 1] += bins[idx]
del bins[idx]
else:
if idx == 0:
del bins[1]
elif idx == len(bad_rates) - 1:
del bins[len(bins) - 2]
else:
if bad_rates[idx - 1]["pct"] < bad_rates[idx + 1]["pct"]:
del bins[idx]
else:
del bins[idx + 1]
bad_rates, _ = bin_bad_rate(x, y, bins, cat=cat)
if cat:
bins = [bad_rate["bin"] for bad_rate in bad_rates]
return bad_rates, bins
def bin_bad_rate(
x: np.ndarray, y: np.ndarray, bins: List, cat: bool = False
) -> tuple[list[dict[str, Union[Union[list, int, float], Any]]], Union[Optional[float], Any]]:
bad_rates = []
if cat:
max_idx = len(bins)
else:
max_idx = len(bins) - 1
for i in range(max_idx):
if cat:
value = bins[i]
else:
value = [bins[i], bins[i + 1]]
x_not_na = x[~pd.isna(x)]
y_not_na = y[~pd.isna(x)]
if cat:
x_in = x_not_na[pd.Series(x_not_na).isin(value)]
else:
x_in = x_not_na[
np.where((x_not_na >= np.min(value)) & (x_not_na < np.max(value)))
]
total = len(x_in)
all_bad = y[~pd.isna(x)].sum()
all_good = len(y[~pd.isna(x)]) - all_bad
bad = y_not_na[np.isin(x_not_na, x_in)].sum()
pct = np.sum(np.isin(x_not_na, x_in)) * 1.0 / len(x)
bad_rate = bad / total
good = total - bad
good_rate = good / total
if good != 0 and bad != 0:
woe = np.log((good / all_good) / (bad / all_bad))
else:
woe = np.log((good + 0.5 / all_good) / (bad + 0.5 / all_bad))
iv = (good_rate - bad_rate) * woe
stats = {
"bin": value,
"total": total,
"bad": bad,
"pct": pct,
"bad_rate": bad_rate,
"woe": woe,
"iv": iv,
}
bad_rates.append(stats)
if cat:
bad_rates.sort(key=lambda _x: _x["bad_rate"])
overall_rate = None
if not cat:
bad = sum([bad_rate["bad"] for bad_rate in bad_rates])
total = sum([bad_rate["total"] for bad_rate in bad_rates])
overall_rate = bad * 1.0 / total
return bad_rates, overall_rate
def cat_binning(
x: np.ndarray,
y: np.ndarray,
min_pct_group: float,
max_bins: int,
diff_woe_threshold: float,
) -> tuple[list[dict[str, Union[Union[list, int, float], Any]]], Optional[str]]:
missing_bin = None
try:
x = x.astype(float)
data_type = "float"
except ValueError:
x = x.astype(object)
data_type = "object"
bins = list([bin] for bin in np.unique(x[~pd.isna(x)]))
if len(bins) > max_bins:
bad_rates_dict = dict(
sorted(
{bins[i][0]: y[np.isin(x, bins[i])].sum() / len(y[np.isin(x, bins[i])]) for i in
range(len(bins))}.items(), key=lambda item: item[1]
)
)
bad_rate_list = [bad_rates_dict[i] for i in bad_rates_dict]
q_list = [0.0]
for quantile in range(1, max_bins):
q_list.append(
np.nanquantile(np.array(bad_rate_list), quantile / max_bins, axis=0)
)
q_list.append(1)
q_list = list(sorted(set(q_list)))
new_bins = [copy.deepcopy([list(bad_rates_dict.keys())[0]])]
start = 1
for i in range(len(q_list) - 1):
for n in range(start, len(list(bad_rates_dict.keys()))):
if bad_rate_list[n] >= q_list[i + 1]:
break
elif (bad_rate_list[n] >= q_list[i]) & (
bad_rate_list[n] < q_list[i + 1]
):
try:
new_bins[i] += [list(bad_rates_dict.keys())[n]]
start += 1
except IndexError:
new_bins.append([])
new_bins[i] += [list(bad_rates_dict.keys())[n]]
start += 1
bad_rates, _ = bin_bad_rate(x, y, new_bins, cat=True)
bins = [bad_rate["bin"] for bad_rate in bad_rates]
else:
bad_rates, _ = bin_bad_rate(x, y, bins, cat=True)
if len(y[pd.isna(x)]) > 0:
if len(bins) < 2:
bins.append([])
if data_type == "object":
bins[1] += ["Missing"]
x[ | pd.isna(x) | pandas.isna |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 11 16:14:00 2019
@author: smithd24
"""
import math
import time
from lsclib import xls_read_interpolate as xlsread
from lsclib import lsc_classes as lsccls
import cProfile, pstats
from lsclib import external_equations as eqn
import pandas as pd
def wedge(trials, Einc = 451.313, light_form = 'direct',
results = 'single', theta_o = .000001, phi_o = .000001):
"""Set up geometry to run trials for a wedge-shaped LSC. Specify
vertices that make up the LSC. Each vertice will belong to a boundary and
each boundary will belong to a volume. Using the LSC classes that have
been defined, attribute characteristics to each boundary and volume.
Parameters
----------
trials : int
Indicate number of bundles that will be used for the program
Einc : float
Indicates incident irradiance on the LSC. Default is 451.313 based upon
the irradiance of the Newport Solar Simulator used for experiments.
light_form : string
Determines distribution of light entering the LSC.
'direct' - light enters at a fixed angle
'diffuse' - light enters with a Lambertian distribution
'ground' - light enters with a modified Lambertian distribution due to
the relative angle up from the ground
results : string
Determines if one result or if a matrix of incidence angle
combinations is desired
theta_o : float, optional
Initial polar incidence angle. Default is zero. This is the polar angle
relative to the LSC normal.
phi_o : float, optional
Initial azimuthal incidence angle. Default is zero. This is the azimuth
angle relative to the LSC normal.
Returns
-------
LSC : object
LSC object is returned after program has run. The LSC object will have
the optical efficiency, short circuit current (for a cell within an
LSC and a bare solar cell), and spectral mismatch factor available as
attributes among a variety of other simulation results.
Notes
-----
Having this written as one large script might not be the best. There are
a large variety of inputs, so, instead of inputting them all here, these
could be inputs to particular functions that make up "lsc_main". This would
also demonstrate that a user is not limited to only the configuration
detailed in the main script shown here.
Phosphor particle should be added before iterating through incidence angle
combinations.
Starting volume/boundary process shoud be improved.
errorcounts should be replaced by a for loop that runs automatically
"""
# Initialize wedge-shaped geometry. Coordinates of individual vertices are
# determined before they are assigned to individual boundaries.
Height = .007
Film_height = .001
Short_side_pos = 0 # distance up from zero to the bottom point of the
# short side of a wedge-shaped LSC
Top_length = .05
Mirror_gap_length = .000001
W = .022
precision = 16
hypotenuse = math.sqrt(Short_side_pos**2 + Top_length**2)
angle = math.acos(Top_length/hypotenuse)
L0 = 0
H0_0 = 0
H0_1 = Film_height
H0_2 = Height
L1 = Mirror_gap_length*math.cos(angle)
H1_0 = Mirror_gap_length*math.sin(angle)
H1_1 = H1_0 + Film_height
L2 = Top_length
H2_0 = Short_side_pos
H2_1 = H2_0 + Film_height
H2_2 = Height
L1 = round(L1, precision)
H1_0 = round(H1_0, precision)
H1_1 = round(H1_1, precision)
H2_1 = round(H2_1, precision)
L = Top_length
H = Height
# read in various excel data tables
[abs_matrix, EQE_pv, IQE_pv, emi_source,
abs_particle, emi_particle] = xlsread.excel_read()
EQE_pv, EQE_pv_max = xlsread.spline(EQE_pv)
IQE_pv, IQE_pv_max = xlsread.spline(IQE_pv)
emi_source, emi_source_max = xlsread.spline(emi_source)
abs_particle, abs_particle_max = xlsread.spline(abs_particle)
emi_particle, emi_particle_max = xlsread.spline(emi_particle)
# establish particle characteristics
wave_len_min = 270 # minimum wavelength that can be absorbed by a particle
wave_len_max = 500 # maximum wavelength that can be absorbed by a particle
qe = 0.75 # quantum efficiency of a particle
poa = .2 # probability of particle absorption (exp. value)
extinction = 4240 # extinction coefficient (42.4 cm^-1)
# establish matrix characteristics
IoR = eqn.IoR_Sellmeier # set index of refraction as constant or eqn
abs_matrix, abs_matrix_max = xlsread.spline(abs_matrix)
wave_len_min_matrix = 229 # minimum wavelength absorbed by matrix
wave_len_max_matrix = 1100 # maximum wavelength absorbed by matrix
# establish solar cell characteristics
wave_len_min_pv = 350 # minimum wavelength absorbed by pv
wave_len_max_pv = 1100 # maximum wavelength absorbed by pv
# if running a combination of many theta_o and phi_o
if light_form == 'direct' and results == 'matrix':
data = {'': [0.001, 15, 30, 45, 60, 75],
0: [0, 0, 0, 0, 0, 0],
15: [0, 0, 0, 0, 0, 0],
30: [0, 0, 0, 0, 0, 0],
45: [0, 0, 0, 0, 0, 0],
60: [0, 0, 0, 0, 0, 0],
75: [0, 0, 0, 0, 0, 0],
89.999: [0, 0, 0, 0, 0, 0]}
# Convert the dictionary into DataFrame
df = pd.DataFrame(data)
df.set_index('', inplace = True)
theta_loop_count = len(df.index)
phi_loop_count = len(df.columns)
# if expecting just one combination of inputs
if results == 'single':
theta_loop_count = 1
phi_loop_count = 1
for j in range(phi_loop_count):
for i in range(theta_loop_count):
start_time = time.time()
lsc = lsccls.LSC() # initialize LSC class
# add phosphor particle
particle = lsccls.Particle(poa, extinction, wave_len_min,
wave_len_max, qe, abs_particle,
emi_particle, emi_particle_max)
# define dimensions/characteristics of Volume 0 - mirror gap
# input boundaries by setting boundary points of each
bdy0a = [[0, L0, H0_0], [0, L1, H1_0], [W, L1, H1_0], [W, L0, H0_0]]
bdy0b = [[0, L1, H1_0], [0, L1, H1_1], [W, L1, H1_1], [W, L1, H1_0]]
bdy0c = [[0, L0, H0_1], [0, L1, H1_1], [W, L1, H1_1], [W, L0, H0_1]]
bdy0d = [[0, L0, H0_0], [0, L0, H0_1], [W, L0, H0_1], [W, L0, H0_0]]
bdy0e = [[0, L0, H0_0], [0, L1, H1_0], [0, L1, H1_1], [0, L0, H0_1]]
bdy0f = [[W, L0, H0_0], [W, L1, H1_0], [W, L1, H1_1], [W, L0, H0_1]]
bdys0 = [bdy0a, bdy0b, bdy0c, bdy0d, bdy0e, bdy0f]
lsc.vol_list.append(lsccls.AbsorbingVolume(bdys0, 0, lsc, IoR,
abs_matrix,
wave_len_min_matrix,
wave_len_max_matrix))
# add bottom surface
lsc[0].bdy_list.append(lsccls.OpaqueBoundary(bdys0[0], lsc[0],
'specular', .05))
# add right interface with film
lsc[0].bdy_list.append(lsccls.TransparentBoundary(bdys0[1],
lsc[0]))
# add interface with rest of matrix
lsc[0].bdy_list.append(lsccls.TransparentBoundary(bdys0[2],
lsc[0]))
# add left solar cell
lsc[0].bdy_list.append(lsccls.PVBoundary(bdys0[3], lsc[0],
'diffuse', EQE_pv ,
0, wave_len_min_pv,
wave_len_max_pv))
# add front mirror
lsc[0].bdy_list.append(lsccls.OpaqueBoundary(bdys0[4], lsc[0],
'specular', .05))
# add back mirror
lsc[0].bdy_list.append(lsccls.OpaqueBoundary(bdys0[5], lsc[0],
'specular', .05))
# define dimensions/characteristics of Volume 1 - phosphor film
# input boundaries by setting boundary points of each
bdy1a = [[0, L1, H1_0], [0, L2, H2_0], [W, L2, H2_0], [W, L1, H1_0]]
bdy1b = [[0, L2, H2_0], [0, L2, H2_1], [W, L2, H2_1], [W, L2, H2_0]]
bdy1c = [[0, L1, H1_1], [0, L2, H2_1], [W, L2, H2_1], [W, L1, H1_1]]
bdy1d = [[0, L1, H1_0], [0, L1, H1_1], [W, L1, H1_1], [W, L1, H1_0]]
bdy1e = [[0, L1, H1_0], [0, L2, H2_0], [0, L2, H2_1], [0, L1, H1_1]]
bdy1f = [[W, L1, H1_0], [W, L2, H2_0], [W, L2, H2_1], [W, L1, H1_1]]
bdys1 = [bdy1a, bdy1b, bdy1c, bdy1d, bdy1e, bdy1f]
lsc.vol_list.append(lsccls.ParticleVolume(bdys1, 1, lsc, IoR,
abs_matrix, particle,
wave_len_min_matrix,
wave_len_max_matrix))
# add bottom surface
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[0], lsc[1],
'specular', .05))
# add right mirror
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[1], lsc[1],
'specular', .05))
# add top surface
lsc[1].bdy_list.append(lsccls.TransparentBoundary(bdys1[2], lsc[1]))
# add left interface with mirror gap
lsc[1].bdy_list.append(lsccls.TransparentBoundary(bdys1[3], lsc[1]))
# add front mirror
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[4], lsc[1],
'specular', .05))
# add back mirror
lsc[1].bdy_list.append(lsccls.OpaqueBoundary(bdys1[5], lsc[1],
'specular', .05))
# define dimensions/characteristics of Volume 2 - rest of matrix
# input boundaries by setting boundary points of each
bdy2a = [[0, L0, H0_1], [0, L2, H2_1], [W, L2, H2_1], [W, L0, H0_1]]
bdy2b = [[0, L2, H2_1], [0, L2, H2_2], [W, L2, H2_2], [W, L2, H2_1]]
bdy2c = [[0, L0, H0_2], [0, L2, H2_2], [W, L2, H2_2], [W, L0, H0_2]]
bdy2d = [[0, L0, H0_1], [0, L0, H0_2], [W, L0, H0_2], [W, L0, H0_1]]
bdy2e = [[0, L0, H0_1], [0, L2, H2_1], [0, L2, H2_2], [0, L0, H0_2]]
bdy2f = [[W, L0, H0_1], [W, L2, H2_1], [W, L2, H2_2], [W, L0, H0_2]]
bdys2 = [bdy2a, bdy2b, bdy2c, bdy2d, bdy2e, bdy2f]
# define volume
lsc.vol_list.append(lsccls.AbsorbingVolume(bdys2, 2, lsc, IoR,
abs_matrix,
wave_len_min_matrix,
wave_len_max_matrix))
# add interface with mirror gap and phosphor film
lsc[2].bdy_list.append(lsccls.TransparentBoundary(bdys2[0],
lsc[2]))
# add right mirror
lsc[2].bdy_list.append(lsccls.OpaqueBoundary(bdys2[1], lsc[2],
'specular', .05))
# add top surface
lsc[2].bdy_list.append(lsccls.TransparentBoundary(bdys2[2],
lsc[2]))
# add solar cell
lsc[2].bdy_list.append(
lsccls.PVBoundary(bdys2[3], lsc[2], 'diffuse', EQE_pv , 0,
wave_len_min_pv, wave_len_max_pv))
# add front mirror
lsc[2].bdy_list.append(lsccls.OpaqueBoundary(bdys2[4], lsc[2],
'specular', .05))
# add back mirror
lsc[2].bdy_list.append(lsccls.OpaqueBoundary(bdys2[5], lsc[2],
'specular', .05))
# Prepare data inputs for LSC simulation
if light_form == 'direct' and results == 'matrix':
theta_o = df.index[i]
phi_o = df.columns[j]
lsc.matching_pairs()
I = Einc*math.cos(math.radians(theta_o))*(L*W)
theta_o = math.radians(theta_o + 180) # adjust theta to head down
phi_o = math.radians(phi_o + 90) # adjust phi
# Run LSC trials, determining fate of every bundle
starting_vol = len(lsc) - 1
starting_bdy = 2
lsc.main(trials, L, W, H, light_form, theta_o,
phi_o, starting_vol, starting_bdy, I,
emi_source, emi_source_max, particle)
# Process data outputs from all LSC trials
# determine if all bundles in volume 0 are accounted for
errorcount0 = (lsc[0].bundles_absorbed +
lsc[0][0].bundles_absorbed +
lsc[0][1].bundles_reflected +
lsc[0][1].bundles_refracted +
lsc[0][2].bundles_reflected +
lsc[0][2].bundles_refracted +
lsc[0][3].bundles_absorbed +
lsc[0][4].bundles_absorbed +
lsc[0][5].bundles_absorbed)
# determine if all bundles in volume 1 are accounted for
errorcount1 = (lsc[1].bundles_absorbed +
lsc[1][0].bundles_absorbed +
lsc[1][1].bundles_absorbed +
lsc[1][2].bundles_reflected +
lsc[1][2].bundles_refracted +
lsc[1][3].bundles_reflected +
lsc[1][3].bundles_refracted +
lsc[1][4].bundles_absorbed +
lsc[1][5].bundles_absorbed +
particle.bundles_absorbed)
# determine if all bundles in volume 2 are accounted for
errorcount2 = (lsc[2].bundles_absorbed +
lsc[2][0].bundles_reflected +
lsc[2][0].bundles_refracted +
lsc[2][1].bundles_absorbed +
lsc[2][2].bundles_reflected +
lsc[2][2].bundles_refracted +
lsc[2][3].bundles_absorbed +
lsc[2][4].bundles_absorbed +
lsc[2][5].bundles_absorbed)
error = (errorcount0 + errorcount1 + errorcount2)/trials
if error != 1:
print("\nENERGY IS NOT CONSERVED!!!!!")
if results == 'matrix':
df.iloc[i,j] = lsc
if results == 'matrix':
writer = | pd.ExcelWriter('LSC_data.xlsx') | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
import argparse
import os
import time
import torch
from torch.autograd import Variable as V
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import numpy as np
from pathlib import Path
import pandas as pd
torch.nn.Module.dump_patches = True
# /////////////// Model Setup ///////////////
torch.manual_seed(1)
np.random.seed(1)
torch.cuda.manual_seed(1)
test_bs = 100
models = {
2:"result/False_-1_False_False_cifar10_300_64_2_False_resnet18_297467904.0_0_1000.0_10.0_0.0005.pt",
1:"result/False_-1_False_False_cifar10_300_64_1_False_resnet18_555417600.0_633833_1000.0_10.0_0.0005.pt",
4:"result/False_-1_False_False_cifar10_300_64_4_False_resnet18_168493056.0_155487_1000.0_10.0_0.0005.pt",
8:"result/False_-1_False_False_cifar10_300_64_8_False_resnet18_104005632.0_262394_1000.0_10.0_0.0005.pt",
16:"result/False_-1_False_False_cifar10_300_64_16_False_resnet18_71761920.0_488910_1000.0_10.0_0.0005.pt",
32:"result/False_-1_False_False_cifar10_300_64_32_False_resnet18_55640064.0_279824_1000.0_10.0_0.0005.pt",
64: "result/False_-1_False_False_cifar10_300_64_64_False_resnet18_47579136.0_13181_1000.0_10.0_0.0005.pt",
}
transform_test = transforms.Compose([
transforms.ToTensor(),
])
prefetch = 4
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
clean_loader = torch.utils.data.DataLoader(testset, batch_size=test_bs, shuffle=False, num_workers=prefetch,pin_memory=True)
dataframeStarted = None
dataframe = None
for l,model_name in models.items():
print("L={}".format(l))
checkpoint = torch.load(model_name)
net = checkpoint
net.output_relus = True
net.cuda()
#net = torch.nn.DataParallel(net, device_ids=[0])
cudnn.benchmark = True
net.eval()
print('Model Loaded')
# /////////////// Data Loader ///////////////
transform = torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
correct = 0
for batch_idx, (data, target) in enumerate(clean_loader):
all_data = list()
for _input in data:
all_data.append(transform(_input).view(1,*_input.size()))
all_data = torch.cat(all_data)
data = all_data.cuda()
output = net.temp_forward(data,l,-1,0)
pred = output.max(1)[1]
correct += pred.eq(target.cuda()).sum()
clean_error = 1 - correct.float() / len(clean_loader.dataset)
clean_error = clean_error.cpu().numpy()
print('Clean dataset error (%): {:.2f}'.format(100 * clean_error))
# /////////////// Further Setup ///////////////
def auc(errs): # area under the distortion-error curve
area = 0
for i in range(1, len(errs)):
area += (errs[i] + errs[i - 1]) / 2
area /= len(errs) - 1
return area
def show_performance(distortion_name):
with torch.no_grad():
errs = []
labels = np.load("data/labels.npy")
dataset = np.load("data/{}.npy".format(distortion_name))
dataset = np.transpose(dataset,[0,3,1,2])
for severity in range(0, 5):
torch_data = torch.FloatTensor(dataset[10000*severity:10000*(severity+1)])
torch_labels = torch.LongTensor(labels[10000*severity:10000*(severity+1)])
test = torch.utils.data.TensorDataset(torch_data, torch_labels)
distorted_dataset_loader = torch.utils.data.DataLoader(test, batch_size=test_bs, shuffle=False,num_workers=prefetch,pin_memory=True)
correct = 0
for batch_idx, (data, target) in enumerate(distorted_dataset_loader):
all_data = list()
for _input in data:
all_data.append(transform(_input/255).view(1,*_input.size()))
all_data = torch.cat(all_data)
data = all_data.cuda()
output = net.temp_forward(data,l,-1,0)
pred = output.max(1)[1]
correct += pred.eq(target.cuda()).sum()
percentage = correct.float() / 10000
errs.append( (1 - percentage ).item())
print('\n=Average', tuple(errs))
return errs
# /////////////// End Further Setup ///////////////
# /////////////// Display Results ///////////////
import collections
distortions = [
'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur',
'snow', 'frost', 'fog', 'brightness',
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression',
]
error_rates = list()
result_dict = dict(l=l,model_name=model_name,clean_error=clean_error)
for distortion_name in distortions:
rate = show_performance(distortion_name)
result_dict[distortion_name] = np.mean(rate)
print('Distortion: {:15s} | Error (%): {:.2f}'.format(distortion_name, 100 * np.mean(rate)))
error_rates.append(np.mean(rate))
if not dataframeStarted:
dataframe = | pd.DataFrame(result_dict,index=[0]) | pandas.DataFrame |
import math
import os
from os.path import join as pjoin
import json
import copy
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import GPUtil
import pandas as pd
from multiprocessing import Pool
from tqdm import tqdm
import sklearn.metrics
from .config import print_config, class_labels
from .utils import (
anno_to_binary, cut_score, debug, display_imgs, info, gen_cwd_slash, labels_to_str, load_config, load_img,
np_macro_f1, str_to_labels, class_id_to_label, class_ids_to_label, combine_windows, chunk, compute_i_coords,
format_macro_f1_details, vec_to_str
)
# from .utils_heavy import predict, model_from_config
from .ignite_trainer import predict as predict
# def predict_and_save_scores(
# config,
# path_to_anno=None,
# path_to_imgs=None,
# save_scores_to=None,
# to_csv=None,
# ):
# model = model_from_config(config, which='latest')
# valid_anno = pd.read_csv(path_to_anno, index_col=0)
# predict(config)
# return valid_anno_predicted
def remove_scores_predicted(config):
cwd_slash = gen_cwd_slash(config)
pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0) \
.drop('Scores Predicted', 1) \
.to_csv(cwd_slash('validation_predictions.csv'))
def evaluate_validation_prediction(config):
info('evaluate_validation_prediction()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(config['path_to_valid_anno_cache'], index_col=0, dtype=object)
prediction_df = pd.read_csv(cwd_slash('valid_predicted.csv'), index_col=0, dtype=object)
anno = anno.join(prediction_df, how='left')
# DEBUG BEGIN
anno.loc[:, ['Target', 'Predicted', 'folder', 'extension']].to_csv(cwd_slash('valid_anno_predicted.csv'))
# DEBUG END
y_true, y_pred = anno_to_binary(anno, config)
macro_f1_score, f1_details = np_macro_f1(y_true, y_pred, config, return_details=True)
print(format_macro_f1_details(f1_details, config))
print(f'macro_f1_score = {macro_f1_score}')
def final_corrections(config):
info('final_corrections()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('test_predicted.csv'), index_col=0)
# correct best submission [TODO: REMOVE: not for private leaderboard] --------------
# best_anno = pd.read_csv(cwd_slash('submission_587.csv'), index_col=0)
# rare_classes = [15, 27, 10, 8, 9, 17, 20, 24, 26]
# comparison_anno = anno.copy()
# comparison_anno['best'] = best_anno['Predicted']
# plot_imgs(
# config,
# comparison_anno.query('best != Predicted').sample(28),
# save_as='./tmp/best_submission_corrections.png',
# folder='data/test_minimaps',
# extension='jpg',
# )
# new_rows = []
# for id_, row in comparison_anno.iterrows():
# current_labels = str_to_labels(row['Predicted'])
# best_labels = str_to_labels(row['best'])
# for c in rare_classes:
# if c in current_labels and c not in best_labels:
# debug(f"removing {c} from {id_}")
# current_labels.remove(c)
# if c not in current_labels and c in best_labels:
# debug(f"adding {c} to {id_}")
# current_labels.append(c)
# new_row = {
# 'Id': id_,
# 'Predicted': labels_to_str(current_labels),
# }
# new_rows.append(new_row)
# anno = pd.DataFrame.from_records(new_rows).set_index('Id')
# debug(f"anno ({len(anno)}) =\n{anno.head(10)}")
# correct leaked --------------
# pairs_anno = pd.read_csv('data/identical_pairs.csv')
# hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
# correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
# .join(anno, how='left', on=['test_id'])
# correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
# debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
# debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
# correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
# actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# # DEBUG BEGIN
# # plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# # DEBUG END
# debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
# debug(f"actual_corrections =\n{actual_corrections}")
# anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# correct leaked 2 --------------
pairs_anno = pd.read_csv('data/identical_pairs_new_fixed.csv')
for i_begin, i_end in chunk(len(pairs_anno), 24):
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('test_id', axis=1).set_index('hpa_id'),
save_as=f'./tmp/diff_{i_begin}_hpa.jpg',
folder='data/hpa_public_imgs',
extension='jpg',
background_color=None,
channel=None,
dpi=100,
)
plot_imgs(
config,
pairs_anno.iloc[i_begin:i_end].drop('hpa_id', axis=1).set_index('test_id'),
save_as=f'./tmp/diff_{i_begin}_test.jpg',
folder='data/test_full_size',
extension='tif',
background_color=None,
channel=['red', 'green', 'blue'],
dpi=100,
)
hpa_anno = pd.read_csv('data/hpa_public_imgs.csv', index_col=0)
correction_anno = pairs_anno.join(hpa_anno, how='left', on=['hpa_id'])\
.join(anno, how='left', on=['test_id'])
correction_anno['Target'] = [labels_to_str(str_to_labels(x)) for x in correction_anno['Target']]
debug(f"correction_anno['test_id'] = {correction_anno['test_id']}")
debug(f"len = {len(anno.loc[correction_anno['test_id'], 'Predicted'].values)}")
correction_anno['Predicted'] = anno.loc[correction_anno['test_id'], 'Predicted'].values
actual_corrections = correction_anno.query('Predicted != Target').set_index('test_id')
# DEBUG BEGIN
# plot_imgs(config, actual_corrections, folder='data/test_minimaps', extension='jpg')
# DEBUG END
debug(f"making {len(correction_anno)} corrections, {len(actual_corrections)} are actually different")
debug(f"actual_corrections =\n{actual_corrections}")
anno.loc[correction_anno['test_id'], 'Predicted'] = correction_anno['Target'].values
# DEBUG BEGIN
# plot_imgs(
# config,
# anno.loc[[27 in str_to_labels(p) for p in anno['Predicted']]],
# folder='data/test_minimaps',
# extension='jpg'
# )
# DEBUG END
anno.to_csv(cwd_slash('test_predicted_corrected.csv'))
# def list_confusion(config):
# fn_counts_list = {}
# class_labels = [f'{k}-{classes[k]}' for k in range(n_classes)]
# for which_class in tqdm(range(n_classes)):
# cwd_slash = gen_cwd_slash(config)
# anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
# y_true, y_pred = anno_to_binary(anno)
# fn = y_true * (1 - y_pred)
# fp = (1 - y_true) * y_pred
# i_fn_predictions = np.nonzero(fn[:, which_class])[0]
# fn_counts = fp[i_fn_predictions, :].sum(axis=0) / len(i_fn_predictions)
# fn_counts_list[class_labels[which_class]] = fn_counts
# # out = pd.Series(fn_counts, index=pd.Index(range(n_classes), name='class'))\
# # .sort_values(ascending=False)\
# # .head(3)
# pd.DataFrame(fn_counts_list, index=class_labels).to_csv('./tmp/confusion.csv')
def plot_imgs(
config,
anno,
save_as='./tmp/imgs.jpg',
folder=None,
extension=None,
background_color=None,
channel=None,
dpi=100,
):
img_list = []
for id_, row in anno.iterrows():
img = load_img(
id_,
config,
resize=False,
folder=row.get('folder') or folder,
channel=channel,
extension=row.get('extension') or extension,
)
# if type(channel) is str:
# channel = {
# 'red': 0,
# 'green': 1,
# 'blue': 2,
# 'yellow': 3,
# }.get(channel)
# if channel is not None:
# img = img[:, :, channel]
debug(f' - Loaded image {id_} with size {img.shape}')
img_label = '\n'.join([f'{id_}'] + [f'{k} = {v}' for k, v in row.items()])
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=save_as,
background_color=background_color,
dpi=dpi,
)
def plot_tfpn_examples(config, which_class, max_n_imgs=28, output_folder='./tmp'):
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('validation_predictions.csv'), index_col=0)
y_true, y_pred = anno_to_binary(anno)
y_true = y_true[:, which_class]
y_pred = y_pred[:, which_class]
def plot_imgs(selector, filename, background_color):
debug(f'selector = {selector}')
if type(config['score_threshold']) is list:
score_threshold = config['score_threshold'][which_class]
else:
score_threshold = config['score_threshold']
tp_idxs = np.nonzero(selector > score_threshold)[0]
if len(tp_idxs) > max_n_imgs:
sample_idxs = np.sort(np.random.choice(range(len(tp_idxs)), max_n_imgs, replace=False))
tp_idxs = tp_idxs[sample_idxs]
img_list = []
for idx in tp_idxs:
row = anno.iloc[idx]
img_id = row.name
labels_true = class_ids_to_label(str_to_labels(row['Target']), config)
labels_pred = class_ids_to_label(str_to_labels(row['Predicted']), config)
img_label = '\n'.join([
f'{img_id}',
f'T: {labels_true}',
f'P: {labels_pred}',
])
# img = load_img(img_id, self.config, resize=False, folder='./data/train_full_size', extension='tif')
img = load_img(
img_id,
config,
resize=False,
folder=config['path_to_valid'],
channel=None,
extension=config['img_extension'],
)
debug(f' - Loaded image {img_id} with size {img.shape}')
img_list.append((img, img_label))
display_imgs(
img_list,
save_as=filename,
background_color=background_color,
)
def out_slash(fn):
return pjoin(output_folder, fn)
plot_imgs(y_true * y_pred, out_slash(f'class_{which_class}_true_positives.png'), 'white')
plot_imgs((1 - y_true) * y_pred, out_slash(f'class_{which_class}_false_positives.png'), 'yellow')
plot_imgs(y_true * (1 - y_pred), out_slash(f'class_{which_class}_false_negatives.png'), 'blue')
# plot_imgs((1 - y_true) * (1 - y_pred), out_slash(f'class_{which_class}_true_negatives.png'), 'black')
def add_extra_data_into_train_anno(config):
cwd_slash = gen_cwd_slash(config)
train_anno = pd.read_csv(cwd_slash('train_windowed_anno.csv'), index_col=0)
valid_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
train_with_hpa_anno = pd.read_csv('data/train_with_hpa.csv', index_col=0)
train_windowed_anno = pd.read_csv('data/train_windowed.csv', index_col=0)
hpa_ids = set(train_with_hpa_anno.index)
existing_ids = set(valid_anno['source_img_id']).union(train_anno['source_img_id'])
new_ids = hpa_ids.difference(existing_ids)
extra_train_anno = train_with_hpa_anno.loc[new_ids]
debug(f'extra_train_anno ({len(extra_train_anno)}) =\n{extra_train_anno.head(10)}')
extra_train_windowed_anno = train_windowed_anno.join(extra_train_anno, how='right', on=['source_img_id'])
debug(f'extra_train_windowed_anno ({len(extra_train_windowed_anno)}) =\n{extra_train_windowed_anno.head(10)}')
pd.concat([train_anno, extra_train_windowed_anno]).to_csv(cwd_slash('train_windowed_anno.csv'))
# def calibrate_one_task(task):
# i_class = task['i_class']
# mat_pred_windowed = task['mat_pred_windowed']
# mat_true = task['mat_true']
# alpha = task['alpha']
# i_windowss = task['i_windowss']
# beta_values = task['beta_values']
# config = task['config']
# details_list = []
# for beta in beta_values:
# vec_true = mat_true[:, i_class]
# vec_pred_windowed = mat_pred_windowed[:, i_class]
# list_pred = []
# for i_source, i_windows in enumerate(i_windowss):
# combined_prediction = vec_pred_windowed[i_windows].mean() + vec_pred_windowed[i_windows].mean()
# list_pred.append(combined_prediction)
# vec_pred = np.array(list_pred)
# f1 = np_macro_f1(vec_true, vec_pred, config)
# details_list.append({
# 'i_class': i_class,
# 'alpha': alpha,
# 'beta': beta,
# 'f1': f1,
# })
# # debug(f'i_class = {i_class}, alpha = {alpha}, beta = {beta}, f1 = {f1}, best_f1 = {best_f1}')
# details_df = pd.DataFrame.from_records(details_list)
# return {
# 'task': task,
# 'details_df': details_df,
# }
# def calibrate_windowed_score(
# config,
# n_threads=70,
# n_cols=7,
# save_graph_to='./tmp/calibrate_score_threshold.png',
# epsilon=1e-7,
# ):
# info('calibrate_windowed_score()')
# cwd_slash = gen_cwd_slash(config)
# alpha_values = range(10)
# beta_values = np.linspace(0, 1, 21)
# mat_pred_windowed = np.load(cwd_slash('valid_windowed_scores.npy'))
# valid_anno = pd.read_csv(config['path_to_valid_anno_cache'])
# mat_true = np.zeros((valid_anno.shape[0], 28))
# for i, target_str in enumerate(valid_anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# valid_windowed_anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# valid_windowed_anno['row_number'] = valid_windowed_anno.index
# grouped = valid_windowed_anno.groupby('source_img_id')
# source_id_to_window_row_nums = {id_: group['row_number'].values.tolist() for id_, group in grouped}
# i_windowss = [source_id_to_window_row_nums[id_] for id_ in valid_anno['Id']]
# task_list = [
# {
# 'i_class': i_class,
# 'alpha': alpha,
# 'mat_pred_windowed': mat_pred_windowed,
# 'mat_true': mat_true,
# 'i_windowss': i_windowss,
# 'beta_values': beta_values,
# 'config': config,
# } for i_class in range(config['_n_classes']) for alpha in alpha_values
# ]
# details_dfs = []
# with Pool(n_threads) as p:
# result_iter = p.imap_unordered(calibrate_one_task, task_list)
# for i_result, result in enumerate(result_iter):
# info(
# f"({i_result}/{len(task_list)}) "
# f"i_class = {result['task']['i_class']}, "
# f"alpha = {result['task']['alpha']} is done"
# )
# details_dfs.append(result['details_df'])
# details_df = pd.concat(details_dfs)
# if save_graph_to is not None:
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# for i_class, group_df in details_df.groupby('i_class'):
# mat = group_df.pivot(index='beta', columns='alpha', values='f1')
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.imshow(mat, aspect='auto')
# plt.xticks(range(len(alpha_values)), alpha_values)
# plt.yticks(range(len(beta_values)), beta_values)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# print(details_df)
# details_df.to_csv(cwd_slash('calibrate_windowed_score_details.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score_details.csv')}")
# best_df = pd.concat([group.sort_values('f1').tail(1) for i_class, group in details_df.groupby('i_class')])
# best_df['manually_modified'] = False
# best_df.to_csv(cwd_slash('calibrate_windowed_score.csv'), index=False)
# debug(f"saved to {cwd_slash('calibrate_windowed_score.csv')}")
# def calibrate_score_threshold(config, n_cols=7, save_graph_to='./tmp/calibrate_score_threshold.png', epsilon=1e-7):
# info('calibrate_score_threshold()')
# cwd_slash = gen_cwd_slash(config)
# n_rows = math.ceil(config['_n_classes'] / n_cols)
# mat_pred = np.load(cwd_slash('valid_scores.npy'))
# anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'))
# mat_true = np.zeros_like(mat_pred)
# for i, target_str in enumerate(anno['Target']):
# targets = str_to_labels(target_str)
# mat_true[np.ix_([i], targets)] = 1
# if save_graph_to is not None:
# plt.figure(figsize=(n_cols * 10, n_rows * 10))
# best_ths = []
# for class_id in tqdm(config['classes']):
# thresholds = np.round(np.linspace(0, 1, 1001), 3)
# f1_scores = np.zeros_like(thresholds)
# ps = []
# rs = []
# for i_th, th in enumerate(thresholds):
# y_pred = mat_pred[:, i_class]
# y_pred = np.where(y_pred < th, np.zeros_like(y_pred), np.ones_like(y_pred))
# y_true = mat_true[:, i_class]
# tp = np.sum(y_true * y_pred, axis=0)
# # tn = np.sum((1 - y_true) * (1 - y_pred), axis=0)
# fp = np.sum((1 - y_true) * y_pred, axis=0)
# fn = np.sum(y_true * (1 - y_pred), axis=0)
# p = tp / (tp + fp + epsilon)
# r = tp / (tp + fn + epsilon)
# ps.append(p)
# rs.append(r)
# out = 2 * p * r / (p + r + epsilon)
# # replace all NaN's with 0's
# out = np.where(np.isnan(out), np.zeros_like(out), out)
# f1_scores[i_th] = out
# if save_graph_to is not None:
# plt.subplot(n_rows, n_cols, i_class + 1, sharex=plt.gca(), sharey=plt.gca())
# plt.plot(thresholds, f1_scores)
# plt.plot(thresholds, ps)
# plt.plot(thresholds, rs)
# plt.text(0, 1, f'{i_class}', transform=plt.gca().transAxes)
# # debug(f'thresholds = {thresholds}')
# # debug(f'f1_scores = {f1_scores}')
# best_th = thresholds[np.argmax(f1_scores)]
# best_ths.append(best_th)
# if save_graph_to is not None:
# plt.savefig(save_graph_to, dpi=100)
# debug(f'Saved graph to {save_graph_to}')
# debug(f'best_ths = {best_ths}')
# with open(cwd_slash('calibrated_score_threshold.json'), 'w') as f:
# json.dump(best_ths, f)
def predict_for_valid(config):
cwd_slash = gen_cwd_slash(config)
valid_windowed_anno = pd.read_csv(config['path_to_valid_windowed_anno_cache'], index_col=0)
predict(
config,
valid_windowed_anno,
cwd_slash('model.pth'),
save_numpy_to='valid_windowed_predicted.npy',
save_csv_to='valid_windowed_anno_predicted.csv',
target_col='corrected_target',
)
# predict(
# anno=cwd_slash('valid_windowed_anno.csv'),
# config=config,
# extension=config['img_extension'],
# folder=config['path_to_valid'],
# to_npy=cwd_slash('valid_windowed_scores.npy'),
# )
# def cut_score_for_valid(config):
# info('cut_score_for_valid()')
# cwd_slash = gen_cwd_slash(config)
# path_to_score = cwd_slash('calibrate_windowed_score.csv')
# if os.path.exists(path_to_score):
# tb = pd.read_csv(path_to_score)
# debug(f"read from {path_to_score}")
# score_threshold = tb.sort_values('i_class')['beta'].values
# debug(f'score_threshold = {score_threshold}')
# min_n_windows = tb.sort_values('i_class')['alpha'].values
# debug(f'min_n_windows = {min_n_windows}')
# else:
# debug(f'WARNING: using default score_threshold and min_n_windows')
# score_threshold = config['score_threshold']
# min_n_windows = 3
# # if os.path.exists(cwd_slash('calibrated_score_threshold.json')):
# # with open(cwd_slash('calibrated_score_threshold.json'), 'r') as f:
# # score_threshold = json.load(f)
# # else:
# # score_threshold = config['score_threshold']
# debug('cut_score()')
# cut_score(
# anno=cwd_slash('valid_windowed_anno.csv'),
# scores_mat=cwd_slash('valid_windowed_scores.npy'),
# config=config,
# prediction_col='Predicted',
# score_threshold=score_threshold,
# to_csv=cwd_slash('valid_windowed_predicted.csv'),
# )
# debug('combine_windows()')
# combine_windows(
# cwd_slash('valid_windowed_predicted.csv'),
# min_n_windows,
# config,
# save_combined_anno_to=cwd_slash('valid_predicted.csv'),
# group_col='source_img_id',
# )
def predict_for_test(config):
info('predict_for_test()')
cwd_slash = gen_cwd_slash(config)
test_windowed_anno = pd.read_csv(config['path_to_test_anno'], index_col=0)
test_windowed_anno = compute_i_coords(test_windowed_anno, config)
test_windowed_anno['group'] = 'test_full_size'
predict(
config,
test_windowed_anno,
cwd_slash('model.pth'),
save_numpy_to='test_windowed_predicted.npy',
save_csv_to='test_windowed_anno_predicted.csv',
)
# anno = pd.read_csv('./data/test_windowed.csv', index_col=0)
# if config['submission_subsampling'] is not None:
# anno = anno.sample(config['submission_subsampling'])
# predict(
# anno=anno,
# config=config,
# extension=config['img_extension'],
# folder=config['path_to_test'],
# to_npy=cwd_slash('test_windowed_scores.npy'),
# )
def create_csv_for_debugger(config):
info('create_csv_for_debugger()')
cwd_slash = gen_cwd_slash(config)
anno = pd.read_csv(cwd_slash('valid_windowed_anno.csv'), index_col=0)
pred_mat = np.load(cwd_slash('valid_windowed_scores.npy'))
pred_anno = | pd.DataFrame(pred_mat, columns=[f'score_of_{x}' for x in config['class_ids']], index=anno.index) | pandas.DataFrame |
import os
import json
import pandas as pd
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import math
import configparser
import logging
import pickle
config = configparser.ConfigParser()
config.read('../config.ini')
logger = logging.getLogger(__name__)
class RealEstateData:
"""
RealEstateData is designed to collect real estate listings for analysis from a given CITY, STATE,
parsing data from the RapidAPI for Realtor.com.
Use Guidelines:
my_real_estate = RealEstateData('CITY', 'STATE_CODE')
my_real_estate_results = my_real_estate.results
To Do:
- Check for null values in API return
- Check for invalid input
"""
def __init__(self, city, state, api):
self.city = city.upper()
self.state = state.upper()
self.api = api
self._url = config.get(api, 'rapidapi_url')
self._jsonREData = self._fetch_housing_data()
self._results = self._parse()
self._requests_remaining = 99999
def __repr__(self):
return f"RealEstateData('{self.city, self.state, self.api}')"
def __str__(self):
return f'{self.city, self.state, self.api} real estate data'
def get_results(self):
return self._results
def _fetch_housing_data(self):
"""
Function to fetch all housing data from Realtor.com via RapidAPI
:return: Dictionary of Dictionaries containing all the results from the the API call
"""
list_json_data = None
list_missed_states = []
list_missed_cities = []
list_missed_offsets = []
list_collected_data = []
response = self.api_call()
if self.validate_api_call(response):
json_content = json.loads(response.content)
list_json_data = [json_content]
housing_total = self.get_housing_total(json_content=json_content)
list_offsets = self.define_chunks(total=housing_total)
for offset in list_offsets:
response = self.api_call(offset=offset)
if self.validate_api_call(response):
list_json_data.append(json_content)
else: # Try again Error is usually 500: Error JSON parsing
response = self.api_call(offset=offset)
if self.validate_api_call(response):
list_json_data.append(json_content)
else:
logger.error(f'{self.state}-{self.city} failed on offset: {offset}')
list_missed_states.append(self.state)
list_missed_cities.append(self.city)
list_missed_offsets.append(offset)
list_collected_data.append(-1)
dict_missed_data = {'state': list_missed_states, 'city': list_missed_cities,
'offset': list_missed_offsets, 'collected': list_collected_data}
if os.path.exists('../../data/models/missed_data.pickle'):
with open('../../data/models/missed_data.pickle', 'rb') as file:
df = pickle.load(file)
df = df.append(dict_missed_data, ignore_index=True)
else:
df = pd.DataFrame(dict_missed_data)
with open('../../data/models/missed_data.pickle', 'wb') as file:
pickle.dump(df, file)
return list_json_data
def api_call(self, offset=0):
"""
Function to conduct an API call and return the response
:param offset:
:return:
"""
querystring = {"city": self.city,
"offset": offset,
"state_code": self.state,
"limit": "200",
"sort": config.get(self.api, 'rapidapi_sort_method')}
headers = {
'x-rapidapi-key': config.get(self.api, 'rapidapi_key'),
'x-rapidapi-host': config.get(self.api, 'rapidapi_host')
}
s = requests.Session()
retries = Retry(total=5, backoff_factor=1, status_forcelist=[502, 503, 504])
s.mount('https://', HTTPAdapter(max_retries=retries))
response = s.get(self._url, headers=headers, params=querystring)
config.set(self.api, 'rapidapi_api_call_limit', response.headers['X-RateLimit-Requests-Remaining'])
with open('../config.ini', 'w') as configfile:
config.write(configfile)
self._requests_remaining = int(response.headers['X-RateLimit-Requests-Remaining'])
return response
def validate_api_call(self, response):
"""
Checks that the 'status' code in the JSON content is 200, indicating that RapidAPI returned data
:param response: RapidAPI response object
:return: True if response contained data, False otherwise
"""
if self.api == 'RAPIDAPI_SALE':
json_content = json.loads(response.content)
# Check for 200 response from RapidAPI server
if json_content['status'] == 200 and json_content['data']['total'] is not None:
return True
if self.api == 'RAPIDAPI_SOLD':
json_content = json.loads(response.content)
# Check for content
if json_content['returned_rows'] > 0:
return True
return False
def get_housing_total(self, json_content):
"""
:param json_content:
:return:
"""
data = config.get(self.api, 'rapidapi_json_lvl1')
total = config.get(self.api, 'rapidapi_json_lvl2')
return json_content[data][total]
def define_chunks(self, total):
"""
Function to define the offset to collect total number of listings in CITY, STATE from Realtor.com via Rapid API
:param total: Total number of listings
:return: list of offsets needed to collect entire dataset
"""
chunk = int(config.get(self.api, 'rapidapi_offset'))
list_chunk_sizes = []
for x in range(1, math.ceil(total / chunk)):
list_chunk_sizes.append(chunk * x)
return list_chunk_sizes
def _parse(self):
"""
Function to format the entire dataset as a DataFrame
:return: DataFrame built from total dataset
"""
df_results = None
list_results_dfs = None
if self._jsonREData is not None:
if self.api == 'RAPIDAPI_SALE':
list_results_dfs = [ | pd.json_normalize(result['data']['results']) | pandas.json_normalize |
import pandas as pd
import numpy as np
import os
import json
from datetime import datetime
import glob
from pathlib import Path
#############Load config.json and get input and output paths
from fullprocess import find_csv_files
with open("config.json", "r") as f:
config = json.load(f)
input_folder_path = config["input_folder_path"]
output_folder_path = config["output_folder_path"]
def write_files(final_df, ingested_files):
if not os.path.exists(output_folder_path):
os.makedirs(
output_folder_path
) # If ingest data directory does not exist create it
filename = os.path.join(output_folder_path, "finaldata.csv")
final_df.to_csv(filename, index=False)
with open(os.path.join(output_folder_path, "ingestedfiles.txt"), "w") as output:
for file in ingested_files:
output.write(f"{file}\n")
def merge_multiple_dataframe():
ingested_files = find_csv_files(input_folder_path)
final_df = pd.DataFrame()
for filename in ingested_files:
temp_df = pd.read_csv(os.path.join(input_folder_path,filename))
final_df = | pd.concat([final_df, temp_df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 29 08:35:09 2019
@author: user
"""
# build first input with noational aggregation
# import build_input_national_aggr
print('####################')
print('BUILDING INPUT DATA FOR DISAGGREGATION OF SWITZERLAND INTO ARCHETYPES')
print('####################')
import os
import itertools
import hashlib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import grimsel.auxiliary.sqlutils.aux_sql_func as aql
import datetime
import seaborn as sns
from grimsel.auxiliary.aux_general import print_full
from grimsel.auxiliary.aux_general import translate_id
import config_local as conf
from grimsel.auxiliary.aux_general import expand_rows
base_dir = conf.BASE_DIR
data_path = conf.PATH_CSV
data_path_prv = conf.PATH_CSV + '_national_aggr'
seed = 2
np.random.seed(seed)
db = conf.DATABASE
sc = conf.SCHEMA
#db = 'grimsel_1'
#sc = 'lp_input_ee_dsm'
def append_new_rows(df, tb):
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
def del_new_rows(ind, tb, df):
del_list = df[ind].drop_duplicates()
for i in ind:
del_list[i] = '%s = '%i + del_list[i].astype(str)
del_str = ' OR '.join(del_list.apply(lambda x: '(' + ' AND '.join(x) + ')', axis=1))
exec_strg = '''
DELETE FROM {sc}.{tb}
WHERE {del_str}
'''.format(tb=tb, sc=sc, del_str=del_str)
aql.exec_sql(exec_strg, db=db)
#def replace_table(df, tb):
#
## list_col = list(aql.get_sql_cols(tb, sc, db).keys())
#
# aql.write_sql(df, db=db, sc=sc, tb=tb, if_exists='replace')
def append_new_cols(df, tb):
#
list_col = list(aql.get_sql_cols(tb, sc, db).keys())
col_new = dict.fromkeys((set(df.columns.tolist()) - set(list_col)))
for key, value in col_new.items():
col_new[key] = 'DOUBLE PRECISION'
# col_new = dict.fromkeys((set(list_col[0].columns.tolist()) - set(list_col)),1)
aql.add_column(df_src=df,tb_tgt=[sc,tb],col_new=col_new,on_cols=list_col, db=db)
# exec_strg = '''
# AlTER
# DELETE FROM {sc}.{tb}
# WHERE {del_str}
# '''.format(tb=tb, sc=sc, del_str=del_str)
# aql.exec_sql(exec_strg, db=db)
#
# aql.write_sql(df[list_col], db=db, sc=sc, tb=tb, if_exists='append')
#
#aql.exec_sql('''
# ALTER TABLE lp_input_archetypes.profdmnd
# DROP CONSTRAINT profdmnd_pkey,
# DROP CONSTRAINT profdmnd_dmnd_pf_id_fkey;
# ''', db=db)
#%%
dfprop_era_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/prop_era_arch.csv', sep = ';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv.csv'),sep=';')
#dfpv_arch = pd.read_csv(os.path.join(base_dir,'PV/surf_prod_arch_pv_prop_0.csv'),sep=';')
dfpv_arch = pd.read_csv(base_dir+'/archetype_disaggr/PV/surf_prod_arch_pv_prop_new.csv',sep=';')
# set nd_id to that potential
#dfpv_arch['pv_power_pot'] = dfpv_arch['el_prod']/(1000*dfkev['flh'].mean())
dfpv_arch = dfpv_arch.groupby(dfpv_arch.nd_id_new).sum()
#dfpv_arch['nd_id_new'] = dfpv_arch.nd_id
#dfpv_arch.loc[:,dfpv_arch.nd_id_new.str.contains('OTH')] == 'OTH_TOT'
#dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_pot']/dfpv_arch['pv_power_pot'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_pv'] = 1666*(dfpv_arch['pv_power_tot_est']/dfpv_arch['pv_power_tot_est'].sum()) # 1666 MW SFOE 2016
dfpv_arch['cap_st_pwr'] = 0
#
#dfpv_arch_CH0 = dfpv_arch.loc['CH0']
#dfpv_arch = dfpv_arch.drop(['CH0'], axis = 0)
dfpv_arch = dfpv_arch.reset_index()
# %%
dfload_arch = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id not in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch['DateTime'] = dfload_arch['DateTime'].astype('datetime64[ns]')
dfload_arch_res = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("SFH") or nd_id.str.contains("MFH")',engine='python').reset_index(drop=True)
dfload_arch_res['DateTime'] = dfload_arch_res['DateTime'].astype('datetime64[ns]')
dfload_arch_notres = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id.str.contains("OCO") or nd_id.str.contains("IND")',engine='python').reset_index(drop=True)
dfload_arch_notres['DateTime'] = dfload_arch_notres['DateTime'].astype('datetime64[ns]')
dfload_arch_CH0 = pd.read_csv(base_dir+'/archetype_disaggr/demand/dmnd_archetypes_0.csv').query(
'nd_id in %s'%(['CH0'])).reset_index(drop=True)
dfload_arch_CH0['DateTime'] = dfload_arch_CH0['DateTime'].astype('datetime64[ns]')
# dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'],'!=')])
# dfload_arch_res= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['SFH%','MFH%'],'LIKE')])
# dfload_arch_notres= aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['OCO%','IND%'],'LIKE')])
# dfload_arch_CH0_1 = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes_0',filt=[('nd_id', ['CH0'])])
#dfload_arch = aql.read_sql('grimsel_1', 'profiles_raw','dmnd_archetypes')
dfload_dict ={}
dfload_dict_new = {}
df = dfload_arch_res.copy()
df['nd_id_new'] = 0
df['erg_tot_new'] = 0
for i in df.nd_id.unique():
dfload_dict[i] = df.loc[df.nd_id == i]
for l in (0,1,2,3):
df_1 = dfload_dict[i].copy()
df_1['erg_tot_new'] = df_1.loc[:,'erg_tot'] * dfprop_era_arch.loc[dfprop_era_arch.nd_el.str.contains(i+'_'+str(l)),'prop'].reset_index(drop=True).loc[0]
df_1['nd_id_new'] = i+'_'+str(l)
dfload_dict_new[i+'_'+str(l)] = df_1
dfload_arch_res_new = dfload_arch_notres.head(0)
for j in dfload_dict_new:
dfload_arch_res_new = dfload_arch_res_new.append(dfload_dict_new[j],ignore_index=True)
dfload_arch_notres['nd_id_new'] = dfload_arch_notres[['nd_id']]
dfload_arch_notres['erg_tot_new'] = dfload_arch_notres[['erg_tot']]
dfload_arch = dfload_arch_res_new.append(dfload_arch_notres,ignore_index=True)
dfload_arch = dfload_arch.set_index('DateTime')
dfload_arch.index = pd.to_datetime(dfload_arch.index)
dfload_arch_CH0 = dfload_arch_CH0.set_index('DateTime')
dfload_arch = dfload_arch.drop(columns=['nd_id','erg_tot']).rename(columns={'nd_id_new':'nd_id','erg_tot_new':'erg_tot'})
# %%
np.random.seed(3)
dferg_arch = dfload_arch.groupby('nd_id')['erg_tot'].sum()
dferg_arch = dferg_arch.reset_index()
dferg_arch['nd_id_new'] = dferg_arch.nd_id
dict_nd = dferg_arch.set_index('nd_id')['nd_id_new'].to_dict()
# %%
df_solar_canton_raw = pd.read_csv(base_dir+'/archetype_disaggr/PV/swiss_location_solar.csv')[['value', 'hy', 'canton','DateTime']]
df_solar_canton_raw['DateTime'] = df_solar_canton_raw['DateTime'].astype('datetime64[ns]')
# df_solar_canton_raw_test = aql.read_sql(db, 'profiles_raw', 'swiss_location_solar',
# keep=['value', 'hy', 'canton','DateTime'])
df_solar_canton_raw_1 = df_solar_canton_raw.pivot_table(index='DateTime',columns='canton', values='value')
df_solar_canton_1h = df_solar_canton_raw_1.resample('1h').sum()/4
df_solar_canton_1h['avg_all'] = df_solar_canton_1h.mean(axis=1)
df_solar_canton_1h['DateTime'] = df_solar_canton_1h.index
df_solar_canton_1h = df_solar_canton_1h.reset_index(drop=True)
df_solar_canton_1h['hy'] = df_solar_canton_1h.index
df_solar_canton_raw_1h = pd.melt(df_solar_canton_1h, id_vars=['DateTime','hy'], var_name='canton', value_name='value')
df_solar_canton_1h.index = df_solar_canton_1h['DateTime']
df_solar_canton_1h = df_solar_canton_1h.drop(columns=['DateTime','hy'])
cols = df_solar_canton_1h.columns.tolist()
cols = cols[-1:] + cols[:-1]
df_solar_canton_1h = df_solar_canton_1h[cols]
#list_ct = df_solar_canton_raw.canton.unique().tolist()
list_ct = df_solar_canton_1h.columns.tolist()
# %% ~~~~~~~~~~~~~~~~~~ DEF_NODE
#
#df_def_node_0 = aql.read_sql(db, sc, 'def_node', filt=[('nd', ['SFH%'], ' NOT LIKE ')])
#df_nd_add = pd.DataFrame(pd.concat([dferg_filt.nd_id_new.rename('nd'),
# ], axis=0)).reset_index(drop=True)
color_nd = {'IND_RUR': '#472503',
'IND_SUB': '#041FA3',
'IND_URB': '#484A4B',
'MFH_RUR_0': '#924C04',
'MFH_SUB_0': '#0A81EE',
'MFH_URB_0': '#BDC3C5',
'MFH_RUR_1': '#924C04',
'MFH_SUB_1': '#0A81EE',
'MFH_URB_1': '#BDC3C5',
'MFH_RUR_2': '#924C04',
'MFH_SUB_2': '#0A81EE',
'MFH_URB_2': '#BDC3C5',
'MFH_RUR_3': '#924C04',
'MFH_SUB_3': '#0A81EE',
'MFH_URB_3': '#BDC3C5',
'OCO_RUR': '#6D3904',
'OCO_SUB': '#0A31EE',
'OCO_URB': '#818789',
'SFH_RUR_0': '#BD6104',
'SFH_SUB_0': '#0EBADF',
'SFH_URB_0': '#A9A4D8',
'SFH_RUR_1': '#BD6104',
'SFH_SUB_1': '#0EBADF',
'SFH_URB_1': '#A9A4D8',
'SFH_RUR_2': '#BD6104',
'SFH_SUB_2': '#0EBADF',
'SFH_URB_2': '#A9A4D8',
'SFH_RUR_3': '#BD6104',
'SFH_SUB_3': '#0EBADF',
'SFH_URB_3': '#A9A4D8',
}
col_nd_df = pd.DataFrame.from_dict(color_nd, orient='index').reset_index().rename(columns={'index': 'nd',0:'color'})
df_def_node_0 = pd.read_csv(data_path_prv + '/def_node.csv')
# df_def_node_0 = aql.read_sql(db, sc, 'def_node')
df_nd_add = pd.DataFrame(pd.concat([dferg_arch.nd_id_new.rename('nd'),
], axis=0)).reset_index(drop=True)
# reduce numbar
#df_nd_add = df_nd_add
nd_id_max = df_def_node_0.loc[~df_def_node_0.nd.isin(df_nd_add.nd)].nd_id.max()
df_nd_add['nd_id'] = np.arange(0, len(df_nd_add)) + nd_id_max + 1
#df_nd_add['color'] = 'g'
df_nd_add = pd.merge(df_nd_add,col_nd_df, on = 'nd')
df_def_node = df_nd_add.reindex(columns=df_def_node_0.columns.tolist()).fillna(0)
dict_nd_id = df_nd_add.set_index('nd')['nd_id'].to_dict()
dict_nd_id = {nd_old: dict_nd_id[nd] for nd_old, nd in dict_nd.items()
if nd in dict_nd_id}
# %% set nd_id number to the corresponding nd_id new
dfpv_arch = dfpv_arch.set_index(dfpv_arch['nd_id_new'])
for key, value in dict_nd_id.items():
dfpv_arch.loc[key,'nd_id'] = value
dferg_arch = dferg_arch.set_index(dfpv_arch['nd_id_new'])
for key, value in dict_nd_id.items():
dferg_arch.loc[key,'nd_id'] = value
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PP_TYPE
df_def_pp_type_0 = pd.read_csv(data_path_prv + '/def_pp_type.csv')
# df_def_pp_type_0 = aql.read_sql(db, sc, 'def_pp_type')
df_def_pp_type = df_def_pp_type_0.copy().head(0)
for npt, pt, cat, color in ((0, 'STO_LI_SFH', 'NEW_STORAGE_LI_SFH', '#7B09CC'),
(1, 'STO_LI_MFH', 'NEW_STORAGE_LI_MFH', '#59F909'),
(2, 'STO_LI_OCO', 'NEW_STORAGE_LI_OCO', '#28A503'),
(3, 'STO_LI_IND', 'NEW_STORAGE_LI_IND', '#1A6703'),
(4, 'PHO_SFH', 'PHOTO_SFH', '#D9F209'),
(5, 'PHO_MFH', 'PHOTO_MFH', '#F2D109'),
(6, 'PHO_OCO', 'PHOTO_OCO', '#F27E09'),
(7, 'PHO_IND', 'PHOTO_IND', '#F22C09'),):
df_def_pp_type.loc[npt] = (npt, pt, cat, color)
df_def_pp_type['pt_id'] = np.arange(0, len(df_def_pp_type)) + df_def_pp_type_0.pt_id.max() + 1
# %% ~~~~~~~~~~~~~~~~~~~~~~ DEF_FUEL
# all there
df_def_fuel = pd.read_csv(data_path_prv + '/def_fuel.csv')
# df_def_fuel_test = aql.read_sql(db, sc, 'def_fuel')
# %% ~~~~~~~~~~~~~~~~~~~~~~~ DEF_PLANT
df_def_plant_0 = pd.read_csv(data_path_prv + '/def_plant.csv')
# df_def_plant_test = aql.read_sql(db, sc, 'def_plant')
dict_pp_id_all = df_def_plant_0.set_index('pp')['pp_id'].to_dict()
df_pp_add_0 = pd.DataFrame(df_nd_add.nd).rename(columns={'nd': 'nd_id'})
df_pp_add_1 = df_pp_add_0.nd_id.str.slice(stop=3)
df_pp_add = pd.DataFrame()
for sfx, fl_id, pt_id, set_1 in [('_PHO', 'photovoltaics', 'PHO_', ['set_def_pr','set_def_add']),
('_STO_LI', 'new_storage', 'STO_LI_', ['set_def_st','set_def_add']),
]:
new_pp_id = df_def_plant_0.pp_id.max() + 1
data = dict(pp=df_pp_add_0 + sfx,
fl_id=fl_id, pt_id=pt_id + df_pp_add_1 , pp_id=np.arange(new_pp_id, new_pp_id + len(df_pp_add_0)),
**{st: 1 if st in set_1 else 0 for st in [c for c in df_def_plant_0.columns if 'set' in c]})
df_pp_add = df_pp_add.append(df_pp_add_0.assign(**data), sort=True)
df_pp_add.pp_id = np.arange(0, len(df_pp_add)) + df_pp_add.pp_id.min()
df_def_plant = df_pp_add[df_def_plant_0.columns].reset_index(drop=True)
for df, idx in [(df_def_fuel, 'fl'), (df_def_pp_type, 'pt'), (df_def_node, 'nd')]:
df_def_plant, _ = translate_id(df_def_plant, df, idx)
# selecting random profiles from canton list
#np.random.seed(4)
dict_pp_id = df_pp_add.set_index('pp')['pp_id'].to_dict()
df_pp_add_pho = df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics']
dict_pp_id_pho = df_pp_add_pho.set_index('pp')['pp_id'].to_dict()
# solar profile dictionary by node
dict_ct = {pp: list_ct[npp%len(list_ct)]
for npp, pp in enumerate(df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics',
'nd_id'].tolist())}
dict_ct = {pp: list_ct[0]
for npp, pp in enumerate(df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics',
'nd_id'].tolist())}
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEF_PROFILE
df_def_profile_0 = pd.read_csv(data_path_prv + '/def_profile.csv')
# df_def_profile_test = aql.read_sql(db, sc, 'def_profile')
df_def_profile_sup = pd.DataFrame({'primary_nd': df_solar_canton_1h.columns}) + '_PHO'
df_def_profile_sup['pf'] = 'supply_' + df_def_profile_sup.primary_nd
df_def_profile_sup['pf_id'] = df_def_profile_sup.index.rename('pf_id') + df_def_profile_0.pf_id.max() + 1
df_def_profile_sup = df_def_profile_sup[df_def_profile_0.columns]
df_def_profile_sup.drop(df_def_profile_sup.tail(23).index,inplace=True) # to keep only average for now
# Demand profiles
df_def_profile_dmnd = df_def_node.nd.copy().rename('primary_nd').reset_index()
df_def_profile_dmnd['pf'] = 'demand_EL_' + df_def_profile_dmnd.primary_nd
df_def_profile_dmnd['pf_id'] = df_def_profile_dmnd.index.rename('pf_id') + df_def_profile_sup.pf_id.max() + 1
df_def_profile_dmnd = df_def_profile_dmnd[df_def_profile_0.columns]
df_def_profile = pd.concat([df_def_profile_sup, df_def_profile_dmnd], axis=0)
# df_def_profile_prc], axis=0)
df_def_profile = df_def_profile.reset_index(drop=True)
#df_def_profile = pd.concat([df_def_profile_sup, df_def_profile_dmnd], axis=0)
df_def_profile
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NODE_ENCAR
df_node_encar_0 = pd.read_csv(data_path_prv + '/node_encar.csv')
# df_node_encar_0 = aql.read_sql(db, sc, 'node_encar')
df_node_encar_0_CH0 = df_node_encar_0.copy().loc[(df_node_encar_0.nd_id == 1)]
factor_CH0_dmnd = dfload_arch_CH0.erg_tot.sum()/df_node_encar_0.loc[(df_node_encar_0.nd_id == 1)].dmnd_sum
factor_CH0_dmnd = factor_CH0_dmnd.reset_index(drop=True)
df = df_node_encar_0_CH0.filter(like='dmnd_sum')*factor_CH0_dmnd.loc[0]
df_node_encar_0_CH0.update(df)
#exec_str = '''UPDATE sc.node_encar SET
# SET sc.dmnd_sum = df_node_encar_0_CH0.dmnd_sum
# WHERE nd_id = 1
#
# '''
#aql.exec_sql(exec_str=exec_str,db=db)
#df_ndca_add = (dferg_filt.loc[dferg_filt.nd_id_new.isin(df_nd_add.nd), ['nd_id_new', 'erg_tot_filled']]
# .rename(columns={'erg_tot_filled': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
df_ndca_add = (dferg_arch.loc[dferg_arch.nd_id_new.isin(df_nd_add.nd), ['nd_id_new', 'erg_tot']]
.rename(columns={'erg_tot': 'dmnd_sum', 'nd_id_new': 'nd_id'}))
#TODO maybe add here some grid losses
data = dict(vc_dmnd_flex=0.1, ca_id=0, grid_losses=0.0413336227316051, grid_losses_absolute=0)
df_node_encar = df_ndca_add.assign(**data).reindex(columns=df_node_encar_0.columns)
list_dmnd = [c for c in df_node_encar if 'dmnd_sum' in c]
df_node_encar = df_node_encar.assign(**{c: df_node_encar.dmnd_sum
for c in list_dmnd})
df_node_encar = pd.merge(df_node_encar, df_def_profile_dmnd, left_on='nd_id', right_on='primary_nd', how='inner')
df_node_encar['dmnd_pf_id'] = df_node_encar.pf
df_node_encar = df_node_encar.loc[:, df_node_encar_0.columns]
for df, idx in [(df_def_node, 'nd'), (df_def_profile, ['pf', 'dmnd_pf'])]:
df_node_encar, _ = translate_id(df_node_encar, df, idx)
fct_dmnd = pd.read_csv(base_dir+'/archetype_disaggr/demand/factor_dmnd_future_years.csv',sep=';')
df = df_node_encar.filter(like='dmnd_sum')*fct_dmnd
df_node_encar.update(df)
df_0 = df_node_encar_0[df_node_encar_0.nd_id !=1]
# TODO REPLACE INSTEAD OF UPDATE
df_node_encar_new = pd.concat([df_0,df_node_encar_0_CH0,df_node_encar])
# set the absolute losses
df_node_encar_new.loc[df_node_encar_new.nd_id ==1,['grid_losses_absolute']] = 142320
df_node_encar_new = df_node_encar_new.reset_index(drop=True)
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFDMND
df_profdmnd_0 = pd.read_csv(data_path_prv + '/profdmnd.csv').query('dmnd_pf_id not in %s'%([0]))
# df_profdmnd_test = aql.read_sql(db, sc, 'profdmnd', filt=[('dmnd_pf_id', [0], '!=')])
#df_profdmnd_0 = aql.read_sql(db, sc, 'profdmnd', filt=[('hy', [0])], limit=1)
df_dmnd_add = dfload_arch
df_dmnd_add_CH0 = dfload_arch_CH0
#
#df_dmnd_add = dfload_arch.loc[dfload_arch.nd_id.isin([{val: key for key, val in dict_nd_id.items()}[nd] for nd in df_nd_add.nd_id])]
#
#df_dmnd_add = dfcr_filt.loc[dfcr_filt.nd_id.isin([{val: key for key, val in dict_nd_id.items()}[nd] for nd in df_nd_add.nd_id])]
df_dmnd_add['nd_id'] = df_dmnd_add.nd_id.replace(dict_nd)
df_dmnd_add['ca_id'] = 0
df_dmnd_add = pd.merge(df_dmnd_add, df_def_profile[['pf_id', 'primary_nd']], left_on='nd_id', right_on='primary_nd')
df_dmnd_add = df_dmnd_add.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
df_dmnd_add_CH0['ca_id'] = 0
df_dmnd_add_CH0['pf_id'] = 0
df_dmnd_add_CH0['primary_nd'] = 'CH0'
df_dmnd_add_CH0 = df_dmnd_add_CH0.rename(columns={'erg_tot': 'value', 'pf_id': 'dmnd_pf_id'})
#df_dmnd_add['value'] = df_dmnd_add.value / 1e3
df_profdmnd = df_dmnd_add[df_profdmnd_0.columns.tolist()].reset_index(drop=True)
df_profdmnd_CH0 = df_dmnd_add_CH0[df_profdmnd_0.columns.tolist()].reset_index(drop=True)
# TODO REPLACE INSTEAD OF UPDATE
df_profdmnd_new = pd.concat([df_profdmnd_CH0,df_profdmnd_0,df_profdmnd])
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFPRICE
# --> NO CHANGES! HOUSEHOLDS USE CH0 PRICE PROFILES
# %% ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PROFSUPPLY
#
df_profsupply = pd.read_csv(data_path_prv + '/profsupply.csv').head()
# df_profsupply = aql.read_sql(db, sc, 'profsupply', filt=[('hy', [0])], limit=1)
df_sup_add = df_pp_add.loc[df_pp_add.fl_id == 'photovoltaics', ['pp_id', 'nd_id']]
df_sup_add['canton'] = df_sup_add.nd_id.replace(dict_ct)
df_sup_add = df_sup_add[['canton']].drop_duplicates()
df_sup_add_new = pd.merge(df_sup_add, df_solar_canton_raw_1h, on='canton', how='inner')
dict_pf_id = df_def_profile_sup.set_index('pf')['pf_id'].to_dict()
#dict_pf_id = {ct: dict_pf_id['supply_' + ct + '_PHO'] for ct in list_ct}
dict_pf_id = {'avg_all': dict_pf_id['supply_' + 'avg_all' + '_PHO']}
df_sup_add_new['supply_pf_id'] = df_sup_add_new.canton.replace(dict_pf_id)
df_profsupply = df_sup_add_new[df_profsupply.columns.tolist()]
# %% ~~~~~~~~~~~~~~~~~~~~~~~ PLANT_ENCAR (needs profsupply data)
df_plant_encar = | pd.read_csv(data_path_prv + '/plant_encar.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 16:41:37 2018
@author: krzysztof
This module contains utilities useful when performing data analysis and drug sensitivity prediction with
Genomics of Drug Sensitivity in Cancer (GDSC) database.
Main utilities are Drug classes and Experiment class. All classes beginning with a word "Drug" represent the compound
coming from GDSC. There is a separate class for every corresponding experiment setup and genomic feature space. All Drug
classes contain methods for extraction and storage of proper input data. Available data types include: gene expression, binary copy number and coding variants, and cell line tissue type. The set of considered genes is represented as "targets"
attribute of Drug classes.
The Experiment class is dedicated for storage and analysis of results coming from machine learning experiments. Actual
machine learning is done outside of a class. The Experiment class have methods for storage, analysis and visualisation
of results.
Classes:
Drug: Basic class representing a compound from GDSC.
DrugWithDrugBank: Inherits from Drug, accounts for target genes from DrugBank database.
DrugGenomeWide: Inherits from Drug, designed for using genome-wide gene exression as input data.
DrugDirectReactome: Inherits from DrugWithDrugBank, uses only input data related to target genes resulting
from direct compound-pathway matching from Reactome.
DrugWithGenesInSamePathways: Inherits from DrugWithDrugBank, uses only input data related to genes that belong in
the same pathways as target genes.
Experiment: Designed to store and analyze results coming from machine learning experiments.
"""
# Imports
import pandas as pd
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from scipy.stats import pearsonr
from sklearn.linear_model import ElasticNet
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
from sklearn.dummy import DummyRegressor
from sklearn.pipeline import Pipeline
from sklearn import feature_selection
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import clone
# General imports
import multiprocessing
import numpy as np
import pandas as pd
import time
import sys
import dill
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn import model_selection
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Lasso, ElasticNet
from stability_selection import StabilitySelection
#################################################################################################################
# Drug class
#################################################################################################################
class Drug(object):
"""Class representing compound from GDSC database.
This is the most basic, parent class. Different experimental settings will use more specific,
children classes. Main function of the class is to create and store input data corresponding to a given
drug. Five types of data are considered: gene expression, copy number variants, coding variants, gene expression
signatures, and tumor tissue type. Class instances are initialized with four basic drug properties: ID, name, gene
targets and target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_cnv_data_faster: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
extract_merck_signatures_data: Generate a DataFrame with gene expression signatures provided by Merck.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data and saves it in corresponding instance's
field.
return_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data but does not save it.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of Drug class objects, each referenced by it's ID
(keys are drug GDSC ID's)
load_data: Load all needed data files as DataFrames with one function call.
"""
# Class variables
map_from_hgnc_to_ensembl = None
map_from_ensembl_to_hgnc = None
# Instance methods
def __init__(self, gdsc_id, name, targets, target_pathway):
"""Intiliaze the class instance with four basic attributes. "Targets" are gene names
and get mapped into Ensembl IDs using class mapping variable."""
self.gdsc_id = gdsc_id
self.name = name
self.targets = targets
self.target_pathway = target_pathway
self.ensembl_targets = []
for x in self.targets:
try:
self.ensembl_targets.append(self.map_from_hgnc_to_ensembl[x])
except KeyError:
pass
def extract_drug_response_data(self, sensitivity_profiles_df, metric="AUC"):
"""Generate a DataFrame containing reponses for every cell line screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
None
"""
df = sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id][
["COSMIC_ID", metric]]
df.columns = ["cell_line_id", metric] # Insert column with samples ID
self.total_no_samples_screened = df.shape[0] # Record how many screened cell lines for drug
self.response_data = df # Put DataFrame into corresponding field
def extract_screened_cell_lines(self, sensitivity_profiles_df):
"""Generate set of cell lines screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
self.screened_cell_lines = list(
sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id]["COSMIC_ID"])
def extract_gene_expression(self, gene_expression_df):
"""Generate DataFrame of gene expression data for cell lines screened for this drug, only
considering drug's target genes.
Arguments:
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
cell_lines_str = [] # Gene expressesion DF column names are strings
for x in self.screened_cell_lines:
cell_lines_str.append(str(x))
cl_to_extract = []
for x in cell_lines_str:
if x in list(gene_expression_df.columns):
cl_to_extract.append(x) # Extract only cell lines contained in gene expression data
gene_expr = gene_expression_df[
gene_expression_df.ensembl_gene.isin(self.ensembl_targets)][["ensembl_gene"] + cl_to_extract]
gene_expr_t = gene_expr.transpose()
columns = list(gene_expr_t.loc["ensembl_gene"])
gene_expr_t.columns = columns
gene_expr_t = gene_expr_t.drop(["ensembl_gene"])
rows = list(gene_expr_t.index)
gene_expr_t.insert(0, "cell_line_id", rows) # Insert columns with cell line IDs
gene_expr_t.reset_index(drop=True, inplace=True)
gene_expr_t["cell_line_id"] = pd.to_numeric(gene_expr_t["cell_line_id"])
self.gene_expression_data = gene_expr_t # Put DataFrame into corresponding field
def extract_mutation_data(self, mutation_df):
"""Generate a DataFrame with binary mutation calls for screened cell lines and target genes.
Arguments:
mutation_df: DataFrame with original mutation calls from GDSC.
Returns:
None
"""
targets = [x + "_mut" for x in self.targets]
df = mutation_df.copy()[
mutation_df.cosmic_sample_id.isin(self.screened_cell_lines)]
df = df[df.genetic_feature.isin(targets)][["cosmic_sample_id", "genetic_feature", "is_mutated"]]
cosmic_ids = []
genetic_features = {}
for feature in df.genetic_feature.unique():
genetic_features[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
df_cl = df[df.cosmic_sample_id == cl_id]
for feature in genetic_features:
mutation_status = df_cl[
df_cl.genetic_feature == feature]["is_mutated"].iloc[0]
genetic_features[feature].append(mutation_status)
df1 = pd.DataFrame()
df1.insert(0, "cell_line_id", cosmic_ids) # Insert column with samples IDs
for feature in genetic_features:
df1[feature] = genetic_features[feature]
self.mutation_data = df1 # Put DataFrame into corresponding field
def extract_cnv_data(self, cnv_binary_df):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in df.genetic_feature.unique():
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = df[
(df.cosmic_sample_id == cl_id) & (df.genetic_feature == feature)]["is_mutated"].iloc[0]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_cnv_data_faster(self, cnv_binary_df, map_cl_id_and_feature_to_status):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Faster implementation than original "extract_cnv_data" by using mapping between genes and
genomic segments.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in features_to_extract:
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = map_cl_id_and_feature_to_status[(cl_id, feature)]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_tissue_data(self, cell_line_list):
"""Generate (dummy encoded) data with cell line tissue type.
Arguments:
cell_line_list (DataFrame): Cell line list from GDSC.
Returns:
None
"""
df = cell_line_list[
cell_line_list["COSMIC_ID"].isin(self.screened_cell_lines)][["COSMIC_ID", "Tissue"]]
df.rename(columns={"COSMIC_ID": "cell_line_id"}, inplace=True)
self.tissue_data = pd.get_dummies(df, columns = ["Tissue"])
def extract_merck_signatures_data(self, signatures_df):
"""Generate data with gene expression signature scores for GDSC cell lines, provided by Merck.
Arguments:
signatures_df (DataFrame): DataFrame with gene signatures for cell lines.
Returns:
None
"""
# Compute list of screened cell lines as strings with prefix "X" in order to match
# signatures DataFrame columns
cell_lines_str = ["X" + str(cl) for cl in self.screened_cell_lines]
# Compute list of cell lines that are contained in signatures data
cls_to_extract = [cl for cl in cell_lines_str
if cl in list(signatures_df.columns)]
# Extract desired subset of signatures data
signatures_of_interest = signatures_df[cls_to_extract]
# Transpose the DataFrame
signatures_t = signatures_of_interest.transpose()
# Create a list of cell line IDs whose format matches rest of the data
cl_ids = pd.Series(signatures_t.index).apply(lambda x: int(x[1:]))
# Insert proper cell line IDs as a new column
signatures_t.insert(0, "cell_line_id", list(cl_ids))
# Drop the index and put computed DataFrame in an instance field
self.merck_signatures = signatures_t.reset_index(drop=True)
def concatenate_data(self, data_combination):
"""Generate data containing chosen combination of genetic data classes.
Arguments:
data_combination: List of strings containing data classes to be included. Available options are:
"mutation", "expression", "CNV", "tissue", "merck signatures".
Returns:
None
"""
# Create a list of DataFrames to include
objects = [self.response_data]
if "mutation" in data_combination and self.mutation_data.shape[0] > 0:
objects.append(self.mutation_data)
if "expression" in data_combination and self.gene_expression_data.shape[0] > 0:
objects.append(self.gene_expression_data)
if "CNV" in data_combination and self.cnv_data.shape[0] > 0:
objects.append(self.cnv_data)
if "tissue" in data_combination and self.tissue_data.shape[0] > 0:
objects.append(self.tissue_data)
if "merck signatures" in data_combination and self.merck_signatures.shape[0] > 0:
objects.append(self.merck_signatures)
# Find intersection in cell lines for all desirable DataFrames
cl_intersection = set(list(self.response_data["cell_line_id"]))
for obj in objects:
cl_intersection = cl_intersection.intersection(set(list(obj["cell_line_id"])))
objects_common = []
for obj in objects:
objects_common.append(obj[obj["cell_line_id"].isin(cl_intersection)])
# Check if all DataFrames have the same number of samples
no_samples = objects_common[0].shape[0]
for obj in objects_common:
assert obj.shape[0] == no_samples
obj.sort_values("cell_line_id", inplace=True)
obj.reset_index(drop=True, inplace=True)
cl_ids = objects_common[0]["cell_line_id"]
df_concatenated = pd.concat(objects_common, axis=1, ignore_index=False)
metric = self.response_data.columns[-1] # Extract the name of metric which was used for sensitivity
sensitivities = df_concatenated[metric]
df_concatenated = df_concatenated.drop(["cell_line_id", metric], axis=1)
df_concatenated.insert(0, "cell_line_id", cl_ids)
df_concatenated.insert(df_concatenated.shape[1], metric, sensitivities)
self.full_data = df_concatenated
def create_full_data(self, sensitivity_profiles_df, gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_line_list=None, mutation_df=None, merck_signatures_df=None,
data_combination=None, metric="AUC"):
"""Combine extraction methods in one to generate a DataFrame with desired data.
When calling a function, original DataFrames parsed should match strings in
data_combination argument. If any of the "_df" arguments is None (default value),
the corresponding data is not included in the output DataFrame.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data.
cell_line_list (DataFrame): Cell line list from GDSC.
mutation_df (DataFrame): DataFrame with original mutation calls from GDSC.
data_combination (list): list of strings containing data classes to be included. Available
options are: "mutation", "expression", "CNV, "tissue", "merck signatures".
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
DataFrame containing desired data for the drug
"""
# Call separate methods for distinct data types
self.extract_screened_cell_lines(sensitivity_profiles_df)
self.extract_drug_response_data(sensitivity_profiles_df, metric)
if type(gene_expression_df) == type(pd.DataFrame()):
self.extract_gene_expression(gene_expression_df)
if type(cnv_binary_df) == type(pd.DataFrame()):
self.extract_cnv_data_faster(cnv_binary_df, map_cl_id_and_feature_to_status)
if type(cell_line_list) == type(pd.DataFrame()):
self.extract_tissue_data(cell_line_list)
if type(mutation_df) == type(pd.DataFrame()):
self.extract_mutation_data(mutation_df)
if type(merck_signatures_df) == type(pd.DataFrame()):
self.extract_merck_signatures_data(merck_signatures_df)
self.concatenate_data(data_combination)
return self.full_data
def return_full_data(self, sensitivity_profiles_df, gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_line_list=None, mutation_df=None, merck_signatures_df=None,
data_combination=None, metric="AUC"):
"""Compute full data with desired data classes and return it, but after that delete data from
instance's data fields in order to save memory.
When calling a function, original DataFrames parsed should match strings in
data_combination argument. If any of the "_df" arguments is None (default value),
the corresponding data is not included in the output DataFrame.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data.
cell_line_list (DataFrame): Cell line list from GDSC.
mutation_df (DataFrame): DataFrame with original mutation calls from GDSC.
data_combination (list): list of strings containing data classes to be included. Available
options are: "mutation", "expression", "CNV, "tissue", "merck signatures".
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
DataFrame containing desired data for the drug
"""
full_df = self.create_full_data(sensitivity_profiles_df, gene_expression_df, cnv_binary_df,
map_cl_id_and_feature_to_status,
cell_line_list, mutation_df, merck_signatures_df,
data_combination, metric)
if type(gene_expression_df) == type(pd.DataFrame()):
self.gene_expression_data = None
if type(cnv_binary_df) == type(pd.DataFrame()):
self.cnv_data = None
if type(cell_line_list) == type(pd.DataFrame()):
self.tissue_data = None
if type(mutation_df) == type(pd.DataFrame()):
self.mutation_data = None
if type(merck_signatures_df) == type(pd.DataFrame()):
self.merck_signatures = None
self.full_data = None
return full_df
def __repr__(self):
"""Return string representation of an object, which can be used to create it."""
return 'Drug({}, "{}", {}, "{}")'.format(self.gdsc_id, self.name, self.targets, self.target_pathway)
def __str__(self):
"""Return string representation of an object"""
return "{} -- {}".format(self.name, self.gdsc_id)
# Class methods
@classmethod
def load_mappings(cls, filepath_hgnc_to_ensembl, filepath_ensembl_to_hgnc):
"""Load dictonaries with gene mappings between HGNC and Ensembl (from pickle files) and assign it
to corresponding class variables. Ensembl IDs are needed for gene expression data.
This method should be called on a Drug class before any other actions with the class.
Arguments:
filepath_hgnc_to_ensembl: file with accurate mapping
filepath_ensembl_to_hgnc: file with accurate mapping
Returns:
None
"""
cls.map_from_hgnc_to_ensembl = pickle.load(open(filepath_hgnc_to_ensembl, "rb"))
cls.map_from_ensembl_to_hgnc = pickle.load(open(filepath_ensembl_to_hgnc, "rb"))
# Static methods
@staticmethod
def create_drugs(drug_annotations_df):
"""Create a dictionary of Drug class objects, each referenced by it's ID (keys are drug GDSC ID's).
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website
Returns:
Dictionary of Drug objects as values and their ID's as keys
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
gdsc_id = getattr(row, "DRUG_ID")
name = getattr(row, "DRUG_NAME")
targets = getattr(row, "TARGET").split(", ")
target_pathway = getattr(row, "TARGET_PATHWAY")
drugs[gdsc_id] = Drug(gdsc_id, name, targets, target_pathway)
return drugs
@staticmethod
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response):
"""Load all needed files by calling one function and return data as tuple of DataFrames. All
argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = | pd.read_excel(drug_response) | pandas.read_excel |
from itertools import combinations
import itertools
from random import *
import random
import pdb
from lifelines.utils import concordance_index
from sklearn import preprocessing
import functools
import random
import time
import pandas as pd
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import torch
import torch.nn as nn
from models.gat import GATNet
from models.gat_gcn import GAT_GCN
from models.gcn import GCNNet
from models.ginconv import GINConvNet
from utils import *
from processing import process_data
import argparse
print = functools.partial(print, flush=True)
def group_by(data):
"""
group documents by query-id
:param data: input_data which contains multiple query and corresponding documents
:param qid_index: the column num where qid locates in input data
:return: a dict group by qid
"""
qid_doc_map = {}
idx = 0
for record in data:
qid_doc_map.setdefault(record, [])
qid_doc_map[record].append(idx)
idx += 1
return qid_doc_map
def sample_index(pairs,sampling_method = None):
'''
pairs: the score pairs for train or test
return:
index of x1 and x2
'''
x1_index = []
x2_index = []
for i_data in pairs:
if sampling_method == '500 times':
sampled_data = pd.DataFrame(i_data).sample(n=500,replace=True)
if sampling_method == None:
sampled_data = pd.DataFrame(i_data)
x1_index.append(sampled_data.iloc[:,0].values)
x2_index.append(sampled_data.iloc[:,1].values)
return x1_index, x2_index
def get_pairs(scores,K,eps=0.2,seed=0):
"""
compute the ordered pairs whose firth doc has a higher value than second one.
:param scores: given score list of documents for a particular query
:param K: times of sampling
:return: ordered pairs. List of tuple, like [(1,2), (2,3), (1,3)]
"""
pairs = []
random.seed(seed)
for i in range(len(scores)):
#for j in range(len(scores)):
# sampling K times
for _ in range(K):
idx = random.randint(0, len(scores) - 1)
score_diff = float(scores[i]) - float(scores[idx])
if abs(score_diff) > eps:
pairs.append((i, idx, score_diff, len(scores)))
return pairs
def split_pairs(order_pairs, true_scores):
"""
split the pairs into two list, named relevant_doc and irrelevant_doc.
relevant_doc[i] is prior to irrelevant_doc[i]
:param order_pairs: ordered pairs of all queries
:param ture_scores: scores of docs for each query
:return: relevant_doc and irrelevant_doc
"""
relevant_doc = []
irrelevant_doc = []
score_diff = []
N_smiles = []
doc_idx_base = 0
query_num = len(order_pairs)
for i in range(query_num):
pair_num = len(order_pairs[i])
docs_num = len(true_scores[i])
for j in range(pair_num):
d1, d2, score, N = order_pairs[i][j]
d1 += doc_idx_base
d2 += doc_idx_base
relevant_doc.append(d1)
irrelevant_doc.append(d2)
score_diff.append(score)
N_smiles.append(N)
doc_idx_base += docs_num
return relevant_doc, irrelevant_doc, score_diff, N_smiles
def filter_pairs(data,order_paris,threshold):
# filterred the pairs which have score diff less than 0.2
order_paris_filtered = []
for i_pairs in order_paris:
pairs1_score = data[pd.DataFrame(i_pairs).iloc[:,0].values][:,1].astype('float32')
pairs2_score = data[pd.DataFrame(i_pairs).iloc[:,1].values][:,1].astype('float32')
# filtered |score|<threshold
score = pairs1_score-pairs2_score
temp_mask = abs(score) > threshold # 0.2 threshold
i_pairs_filtered = np.array(i_pairs)[temp_mask].tolist()
if len(i_pairs_filtered)>0:
order_paris_filtered.append(i_pairs_filtered)
return order_paris_filtered
class hinge_loss(nn.Module):
def __init__(self,threshold=1,weight=None):
super().__init__()
self.threshold = 1
self.weight = weight
def forward(self,predicted_score,true_score,n = None):
# score_diff = predicted_score - true_score
score_diff = predicted_score*true_score
loss = self.threshold - score_diff
loss = torch.clip(loss,min=0)
loss = torch.square(loss)
if not self.weight is None:
loss = loss * self.weight
return 0.5*loss.mean()
def sample_pairs(true_scores,K,eps,seed):
# get all the pairs after filtering based on scores
order_paris = []
for scores in true_scores:
order_paris.append(get_pairs(scores,K=K,eps=eps,seed=seed))
x1_index, x2_index, train_scores, N_smiles = split_pairs(order_paris ,true_scores)
print('Number of training dataset is {}'.format(len(x1_index)))
# change labels to binary
Y = np.array(train_scores).astype('float32')
Y[Y<0] = 0
Y[Y>0] = 1
return x1_index, x2_index, train_scores, Y
def distributed_concat(tensor, num_total_examples):
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0) # truncate the dummy elements added by SequentialDistributedSampler
return concat[:num_total_examples]
def model_eval(model,val_dataloader,device):
model.eval()
## validation
CI_list = []
weighted_CI_list = []
weights_len = []
with torch.no_grad():
for batch_id, data in enumerate(val_dataloader):
i_target_len = len(data)
i_target_pred_scores = []
i_target_y_label = []
# loop over all the D-T pairs in one group(T group)
for i_data in data:
i_data = i_data.to(device)
pred_scores = model.forward_single(i_data)
# get the predicted labels
i_target_pred_scores.append(pred_scores.cpu().numpy()[0])
# get the true labels
i_target_y_label.append(i_data.y.cpu().numpy()[0])
i_target_pred_scores = np.array(i_target_pred_scores)
i_target_y_label = np.array(i_target_y_label)
# compute CI
CI = concordance_index(i_target_y_label,i_target_pred_scores)
CI_list.append(CI)
weighted_CI_list.append(i_target_len*CI)
weights_len.append(i_target_len)
average_CI = np.mean(CI_list)
weighted_CI = np.sum(weighted_CI_list)/np.sum(weights_len)
return average_CI, weighted_CI
def dist_run(rank, args, world_size, train_set,mixed_set,val_set,test_set,model,CV):
dist.init_process_group('nccl', rank=rank, world_size=world_size)
print(rank)
# prepare the processed data
#train
train_t = train_set[2]
train_d = train_set[1]
train_groups = train_set[0]
train_y = train_set[3]
train_smiles_graph = train_set[4]
if args.is_mixed:
#mixed
mixed_t = mixed_set[2]
mixed_d = mixed_set[1]
mixed_groups = mixed_set[0]
mixed_y = mixed_set[3]
mixed_smiles_graph = mixed_set[4]
del train_set
del mixed_set
# val
val_t = val_set[2]
val_d = val_set[1]
val_groups = val_set[0]
val_y = val_set[3]
val_smiles_graph = val_set[4]
# test
test_t = test_set[2]
test_d = test_set[1]
test_groups = test_set[0]
test_y = test_set[3]
test_smiles_graph = test_set[4]
##################### load the data ############################
if args.is_mixed:
# concatenate the data
train_t_data = np.concatenate((train_t,mixed_t))
train_d_data = np.concatenate((train_d,mixed_d))
train_smiles_graph_data = {**train_smiles_graph, **mixed_smiles_graph}
else:
train_t_data = train_t
train_d_data = train_d
train_smiles_graph_data = train_smiles_graph
# get the group
qid_doc_map_train = group_by(train_groups)
query_idx_train = qid_doc_map_train.keys()
train_keys = np.array(list(query_idx_train))
if args.is_mixed:
id_doc_map_mixed = group_by(mixed_groups)
query_idx_mixed = id_doc_map_mixed.keys()
mixed_keys = np.array(list(query_idx_mixed))
qid_doc_map_val = group_by(val_groups)
query_idx_val = qid_doc_map_val.keys()
val_keys = np.array(list(query_idx_val))
qid_doc_map_test = group_by(test_groups)
query_idx_test = qid_doc_map_test.keys()
test_keys = np.array(list(query_idx_test))
###### get the protein group and index for train/val/test
# get the true scores of train
true_scores = [train_y[qid_doc_map_train[qid]] for qid in query_idx_train]
if args.is_mixed:
true_scores_mixed = [mixed_y[id_doc_map_mixed[qid]] for qid in query_idx_mixed]
# ###### get val/test dataloader
val_index = []
for qid in val_keys:
val_index.append(qid_doc_map_val[qid])
val_dataset = TestDataset(test_index=val_index,xd=val_d,xt=val_t,y=val_y,smile_graph=val_smiles_graph)
val_dataloader = DataLoader(val_dataset, batch_size=args.test_batch_size,shuffle=False)
test_index = []
for qid in test_keys:
test_index.append(qid_doc_map_test[qid])
test_dataset = TestDataset(test_index=test_index,xd=test_d,xt=test_t,y=test_y,smile_graph=test_smiles_graph)
test_dataloader = DataLoader(test_dataset, batch_size=args.test_batch_size,shuffle=False)
###### load model
model = model.to(rank)
model_dist = DistributedDataParallel(model, device_ids=[rank], find_unused_parameters=True)
# define the optimizer
optimizer = torch.optim.Adam(model_dist.parameters(), lr=args.learning_rate)
print('start to train the model...')
for epoch in range(args.N_epoch):
##################### resampling the pairs for each epoch #####################
start_time = time.time()
train_x1_index, train_x2_index, train_scores, Y_train = sample_pairs(true_scores,K=args.sampling_N_train,eps=args.filter_threshold,seed=epoch)
if args.is_mixed:
mixed_x1_index, mixed_x2_index, mixed_scores, Y_mixed = sample_pairs(true_scores_mixed,K=args.sampling_N_mixed,eps=args.filter_threshold,seed=epoch)
# mixed all pairs from train and mixed dataset
len_train = len(train_x1_index)
onehot_train = np.zeros(len_train)
if args.is_mixed:
len_mixed1 = len(mixed_x1_index)
onehot_mixed = np.ones(len_mixed1)
onehot_train_mixed = np.concatenate((onehot_train,onehot_mixed))
else:
onehot_train_mixed = onehot_train
if args.is_mixed:
temp = len(train_d)
mixed_x1_index = [i + temp for i in mixed_x1_index]
mixed_x2_index = [i + temp for i in mixed_x2_index]
train_x1_index = train_x1_index + mixed_x1_index
train_x2_index = train_x2_index + mixed_x2_index
Y_train_data = np.concatenate((Y_train,Y_mixed))
else:
Y_train_data = Y_train
# get dataloader
train_dataset = TrainDataset(train_x1_index=train_x1_index,train_x2_index=train_x2_index,train_d=train_d_data, train_t=train_t_data, y=Y_train_data,onehot_train_mixed=onehot_train_mixed,smile_graph=train_smiles_graph_data)
train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True)
train_dataloader = DataLoader(train_dataset, batch_size=args.train_batch_size,sampler=train_sampler)
end_time = time.time()
print('make pairs + sampling, take time {}'.format(end_time-start_time))
##################### resampling the pairs for each epoch #####################
print('***************train')
LOSS = []
model.train()
start_time = time.time()
for batch_id, data in enumerate(train_dataloader):
data1 = data[0].to(rank)
data2 = data[1].to(rank)
batch_train_mixed = data1.train_mixed
optimizer.zero_grad()
output = model_dist(data1,data2)
ture_labels = data1.y.view(-1, 1).float()
output_train = output[batch_train_mixed==0]
output_mixed = output[batch_train_mixed==1]
ture_labels_train = ture_labels[batch_train_mixed==0]
ture_labels_test = ture_labels[batch_train_mixed==1]
###### define loss and optimization function
loss_fn = nn.BCEWithLogitsLoss()
loss = loss_fn(output, ture_labels)
loss.backward()
optimizer.step()
if batch_id % 20 == 0:
print('batch {} loss {}'.format(batch_id,loss.item()))
LOSS.append(loss.cpu().detach().numpy())
end_time = time.time()
print('take time {}'.format(end_time-start_time))
print('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)))
if rank == 0:
# validation
print('***************validation')
val_average_CI, val_weighted_CI = model_eval(model,val_dataloader,device='cuda:0')
print("val_Average CI is {}".format(val_average_CI))
print("val_weighted CI is {}".format(val_weighted_CI))
# test
print('***************test')
test_average_CI, test_weighted_CI = model_eval(model,test_dataloader,device='cuda:0')
print("test_Average CI is {}".format(test_average_CI))
print("test_weighted CI is {}".format(test_weighted_CI))
if epoch == 0:
best_average_CI = val_average_CI
# save the best epoch
torch.save(model.state_dict(), args.save_direct + CV + '_' + 'train_model_best' )
with open(args.save_direct + CV + '_' + "best_results.txt", "w") as text_file:
text_file.write('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)) + '\n')
text_file.write("val Average CI is {}".format(val_average_CI) + '\n')
text_file.write("val weighted CI is {}".format(val_weighted_CI) + '\n')
text_file.write("test Average CI is {}".format(test_average_CI) + '\n')
text_file.write("test weighted CI is {}".format(test_weighted_CI) + '\n')
text_file.write('##############################################' + '\n')
if (epoch != 0) & (val_average_CI >= best_average_CI):
best_average_CI = val_average_CI
# save the best epoch
torch.save(model.state_dict(), args.save_direct + CV + '_' + 'train_model_best' )
with open(args.save_direct + CV + '_' + "best_results.txt", "w") as text_file:
text_file.write('epoch {}: loss: {} '.format(epoch,np.mean(LOSS)) + '\n')
text_file.write("val Average CI is {}".format(val_average_CI) + '\n')
text_file.write("val weighted CI is {}".format(val_weighted_CI) + '\n')
text_file.write("test Average CI is {}".format(test_average_CI) + '\n')
text_file.write("test weighted CI is {}".format(test_weighted_CI) + '\n')
text_file.write('##############################################' + '\n')
def run(args):
print('Load data...')
###### load model
model = eval(args.model_name)()
CVs = ['CV1','CV2','CV3','CV4','CV5']
data_path = args.data_path + args.dataset + '/'
for CV in CVs:
print('><<><><><><><><><><><><><><><><><><><><><><><><><<><><><><><>')
print('start {}'.format(CV))
##################### load the data ############################
train_file = CV + '_' + args.dataset + '_' + args.split +'_' + 'train' + '.csv'
val_file = CV + '_' + args.dataset + '_' + args.split + '_' + 'val' + '.csv'
test = 'test_' + args.dataset + '_' + args.split + '.csv'
# load the data
train_data = pd.read_csv(data_path + CV + '/' + train_file)
val_data = pd.read_csv(data_path + CV + '/' + val_file)
test_data = | pd.read_csv(data_path + test) | pandas.read_csv |
"""
Use info from TEF sections to calculate values for the
Geyer and MacCready 2014 parameter space diagram.
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
from datetime import datetime, timedelta
import netCDF4 as nc
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import zfun
import tef_fun
import flux_fun
from time import time
from warnings import filterwarnings
filterwarnings('ignore') # skip some warning messages
# associated with lines like QQp[QQ<=0] = np.nan
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', type=str, default='cas6')
parser.add_argument('-t', '--tag', type=str, default='v3')
parser.add_argument('-x', '--ex_name', type=str, default='lo8b')
parser.add_argument('-y', '--year', type=int, default=2017)
args = parser.parse_args()
year_str = str(args.year)
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
gtagex = args.gridname + '_' + args.tag + '_' + args.ex_name
# select input/output location
run_name = gtagex+'_'+year_str+'.01.01_'+year_str+'.12.31'
indir00 = Ldir['LOo'] + 'tef2/'
indir0 = indir00 + run_name + '/'
indir = indir0 + 'flux/'
x_indir = indir0 + 'extractions/'
outdir = indir00 + 'sill_dyn_plots/'
Lfun.make_dir(outdir)
# get section definitions
sect_df = tef_fun.get_sect_df()
testing = False
if testing:
sect_list = ['ai1']
else:
sect_list = list(sect_df.index)
# initialize DataFrame
q_df = | pd.DataFrame(index=sect_list,
columns=['Ut', 'Qprism','H', 'Qr','Ur', 'M', 'Fr', 'Qe', 'Ue', 'DS', 'Sbar', 'c', 'Ue_non', 'DS_non']) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex(
levels=[[0.5, 0.8], ["a", "b"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = result.sort_index()
assert result.index.is_lexsorted()
tm.assert_frame_equal(result, expected)
concatted = pd.concat([df, df], keys=[0.8, 0.5])
result = concatted.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# GH#14015
df = DataFrame(
[[1, 2], [6, 7]],
columns=MultiIndex.from_tuples(
[(0, "20160811 12:00:00"), (0, "20160809 12:00:00")],
names=["l1", "Date"],
),
)
df.columns = df.columns.set_levels(
pd.to_datetime(df.columns.levels[1]), level=1
)
assert not df.columns.is_lexsorted()
assert not df.columns.is_monotonic
result = df.sort_index(axis=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
result = df.sort_index(axis=1, level=1)
assert result.columns.is_lexsorted()
assert result.columns.is_monotonic
# TODO: better name, de-duplicate with test_sort_index_level above
def test_sort_index_level2(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
df = frame.copy()
df.index = np.arange(len(df))
# axis=1
# series
a_sorted = frame["A"].sort_index(level=0)
# preserve names
assert a_sorted.index.names == frame.index.names
# inplace
rs = frame.copy()
return_value = rs.sort_index(level=0, inplace=True)
assert return_value is None
tm.assert_frame_equal(rs, frame.sort_index(level=0))
def test_sort_index_level_large_cardinality(self):
# GH#2684 (int64)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int64)
# it works!
result = df.sort_index(level=0)
assert result.index.lexsort_depth == 3
# GH#2684 (int32)
index = MultiIndex.from_arrays([np.arange(4000)] * 3)
df = DataFrame(np.random.randn(4000), index=index, dtype=np.int32)
# it works!
result = df.sort_index(level=0)
assert (result.dtypes.values == df.dtypes.values).all()
assert result.index.lexsort_depth == 3
def test_sort_index_level_by_name(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
frame.index.names = ["first", "second"]
result = frame.sort_index(level="second")
expected = frame.sort_index(level=1)
tm.assert_frame_equal(result, expected)
def test_sort_index_level_mixed(self):
mi = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=mi,
columns=Index(["A", "B", "C"], name="exp"),
)
sorted_before = frame.sort_index(level=1)
df = frame.copy()
df["foo"] = "bar"
sorted_after = df.sort_index(level=1)
tm.assert_frame_equal(sorted_before, sorted_after.drop(["foo"], axis=1))
dft = frame.T
sorted_before = dft.sort_index(level=1, axis=1)
dft["foo", "three"] = "bar"
sorted_after = dft.sort_index(level=1, axis=1)
tm.assert_frame_equal(
sorted_before.drop([("foo", "three")], axis=1),
sorted_after.drop([("foo", "three")], axis=1),
)
def test_sort_index_preserve_levels(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.sort_index()
assert result.index.names == frame.index.names
@pytest.mark.parametrize(
"gen,extra",
[
([1.0, 3.0, 2.0, 5.0], 4.0),
([1, 3, 2, 5], 4),
(
[
| Timestamp("20130101") | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook theta-model.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # The Theta Model
#
# The Theta model of Assimakopoulos & Nikolopoulos (2000) is a simple
# method for forecasting the involves fitting two $\theta$-lines,
# forecasting the lines using a Simple Exponential Smoother, and then
# combining the forecasts from the two lines to produce the final forecast.
# The model is implemented in steps:
#
#
# 1. Test for seasonality
# 2. Deseasonalize if seasonality detected
# 3. Estimate $\alpha$ by fitting a SES model to the data and $b_0$ by
# OLS.
# 4. Forecast the series
# 5. Reseasonalize if the data was deseasonalized.
#
# The seasonality test examines the ACF at the seasonal lag $m$. If this
# lag is significantly different from zero then the data is deseasonalize
# using `statsmodels.tsa.seasonal_decompose` use either a multiplicative
# method (default) or additive.
#
# The parameters of the model are $b_0$ and $\alpha$ where $b_0$ is
# estimated from the OLS regression
#
# $$
# X_t = a_0 + b_0 (t-1) + \epsilon_t
# $$
#
# and $\alpha$ is the SES smoothing parameter in
#
# $$
# \tilde{X}_t = (1-\alpha) X_t + \alpha \tilde{X}_{t-1}
# $$
#
# The forecasts are then
#
# $$
# \hat{X}_{T+h|T} = \frac{\theta-1}{\theta} \hat{b}_0
# \left[h - 1 + \frac{1}{\hat{\alpha}}
# - \frac{(1-\hat{\alpha})^T}{\hat{\alpha}} \right]
# + \tilde{X}_{T+h|T}
# $$
#
# Ultimately $\theta$ only plays a role in determining how much the trend
# is damped. If $\theta$ is very large, then the forecast of the model is
# identical to that from an Integrated Moving Average with a drift,
#
# $$
# X_t = X_{t-1} + b_0 + (\alpha-1)\epsilon_{t-1} + \epsilon_t.
# $$
#
# Finally, the forecasts are reseasonalized if needed.
#
# This module is based on:
#
# * <NAME>., & <NAME>. (2000). The theta model: a
# decomposition
# approach to forecasting. International journal of forecasting, 16(4),
# 521-530.
# * <NAME>., & <NAME>. (2003). Unmasking the Theta method.
# International Journal of Forecasting, 19(2), 287-290.
# * <NAME>., <NAME>., <NAME>., & <NAME>.
# (2015). The optimized theta method. arXiv preprint arXiv:1503.03529.
# ## Imports
#
# We start with the standard set of imports and some tweaks to the default
# matplotlib style.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_datareader as pdr
import seaborn as sns
plt.rc("figure", figsize=(16, 8))
plt.rc("font", size=15)
plt.rc("lines", linewidth=3)
sns.set_style("darkgrid")
# ## Load some Data
#
# We will first look at housing starts using US data. This series is
# clearly seasonal but does not have a clear trend during the same.
reader = pdr.fred.FredReader(["HOUST"], start="1980-01-01", end="2020-04-01")
data = reader.read()
housing = data.HOUST
housing.index.freq = housing.index.inferred_freq
ax = housing.plot()
# We fit specify the model without any options and fit it. The summary
# shows that the data was deseasonalized using the multiplicative method.
# The drift is modest and negative, and the smoothing parameter is fairly
# low.
from statsmodels.tsa.forecasting.theta import ThetaModel
tm = ThetaModel(housing)
res = tm.fit()
print(res.summary())
# The model is first and foremost a forecasting method. Forecasts are
# produced using the `forecast` method from fitted model. Below we produce a
# hedgehog plot by forecasting 2-years ahead every 2 years.
#
# **Note**: the default $\theta$ is 2.
forecasts = {"housing": housing}
for year in range(1995, 2020, 2):
sub = housing[:str(year)]
res = ThetaModel(sub).fit()
fcast = res.forecast(24)
forecasts[str(year)] = fcast
forecasts = | pd.DataFrame(forecasts) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 02_collect-data.ipynb (unless otherwise specified).
__all__ = ['query_lexeme', 'query_subr', 'query_subr_year', 'get_results', 'conv_results_to_df', 'comm_subr_to_csv',
'get_subr_year']
# Cell
from psaw import PushshiftAPI
from tqdm import tqdm
import datetime as dt
import json
import pandas as pd
import os
# Cell
def query_lexeme(lex, year):
api = PushshiftAPI()
gen = api.search_comments(
q = lex,
after = int(dt.datetime(year, 1, 1).timestamp()),
before = int(dt.datetime(year, 12, 31).timestamp())
)
return gen
# Cell
def query_subr(subreddit):
api = PushshiftAPI()
gen = api.search_comments(
subreddit=subreddit
)
return gen
# Cell
def query_subr_year(subreddit, year):
api = PushshiftAPI()
gen = api.search_comments(
subreddit = subreddit,
after = int(dt.datetime(int(year), 1, 1).timestamp()),
before = int(dt.datetime(int(year), 12, 31).timestamp())
)
return gen
# Cell
def get_results(gen, limit):
cache = []
for c in tqdm(gen, total=limit):
cache.append(c)
if len(cache) >= limit:
break
return cache
# Cell
def conv_results_to_df(results):
df = | pd.DataFrame([thing.d_ for thing in results]) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import os
import matplotlib.pyplot as plt
import re
import numpy as np
import pandas as pd
from scipy.stats import mode
from nltk import skipgrams
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import itertools
import lightgbm as lgb
from lightgbm import LGBMClassifier
from sklearn.model_selection import cross_val_score, RandomizedSearchCV
from sklearn.metrics import accuracy_score
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn import decomposition, ensemble
from sklearn.pipeline import Pipeline
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from xgboost import XGBClassifier
from rgf.sklearn import FastRGFClassifier
from sklearn.model_selection import GridSearchCV
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
SEED = 42
join = os.path.join
# In[68]:
data = pd.read_csv('Devex_train.csv', encoding="latin-1")
# In[72]:
df_train = pd.read_csv('Devex_train.csv', low_memory=False, encoding='latin1')
df_submission = | pd.read_csv('Devex_submission_format.csv', low_memory=False, encoding='latin1') | pandas.read_csv |
import os
from collections import deque
import pandas as pd
import numpy as np
from crawling.models.instagram import Instagram
if __name__ == "__main__":
# load influencer data
df = | pd.read_csv("data/influencer_dataframe.csv", sep=';', index_col=0) | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
tm.assert_series_equal(result, expected)
def test_sort_index_nan(self):
# GH#3917
# Test DataFrame with nan label
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=True, na_position="last")
expected = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position="first")
expected = DataFrame(
{"A": [4, 1, 2, np.nan, 1, 6, 8], "B": [5, 9, np.nan, 5, 2, 5, 4]},
index=[np.nan, 1, 2, 3, 4, 5, 6],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind="quicksort", ascending=False)
expected = DataFrame(
{"A": [8, 6, 1, np.nan, 2, 1, 4], "B": [4, 5, 2, 5, np.nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, np.nan],
)
tm.assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind="quicksort", ascending=False, na_position="first"
)
expected = DataFrame(
{"A": [4, 8, 6, 1, np.nan, 2, 1], "B": [5, 4, 5, 2, 5, np.nan, 9]},
index=[np.nan, 6, 5, 4, 3, 2, 1],
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_index_multi_index(self):
# GH#25775, testing that sorting by index works with a multi-index.
df = DataFrame(
{"a": [3, 1, 2], "b": [0, 0, 0], "c": [0, 1, 2], "d": list("abc")}
)
result = df.set_index(list("abc")).sort_index(level=list("ba"))
expected = DataFrame(
{"a": [1, 2, 3], "b": [0, 0, 0], "c": [1, 2, 0], "d": list("bca")}
)
expected = expected.set_index(list("abc"))
tm.assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(
np.random.randn(4, 4), index=[1, 2, 3, 4], columns=["A", "B", "C", "D"]
)
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered["A"])
df = unordered.copy()
return_value = df.sort_index(inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
assert a_id != id(df["A"])
df = unordered.copy()
return_value = df.sort_index(ascending=False, inplace=True)
assert return_value is None
expected = frame[::-1]
tm.assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ["D", "B", "C", "A"]]
df = unordered.copy()
return_value = df.sort_index(axis=1, inplace=True)
assert return_value is None
expected = frame
tm.assert_frame_equal(df, expected)
df = unordered.copy()
return_value = df.sort_index(axis=1, ascending=False, inplace=True)
assert return_value is None
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({"A": A, "B": B, "C": np.random.randn(100)})
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
# test with multiindex, too
idf = df.set_index(["A", "B"])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
tm.assert_frame_equal(result, expected)
# also, Series!
result = idf["C"].sort_index(ascending=[1, 0])
tm.assert_series_equal(result, expected["C"])
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
df = DataFrame([[1, 2], [3, 4]], mi)
result = df.sort_index(level="A", sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["A", "B"], sort_remaining=False)
expected = df
tm.assert_frame_equal(result, expected)
# Error thrown by sort_index when
# first index is sorted last (GH#26053)
result = df.sort_index(level=["C", "B", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["B", "C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(level=["C", "A"])
expected = df.iloc[[1, 0]]
tm.assert_frame_equal(result, expected)
def test_sort_index_categorical_index(self):
df = DataFrame(
{
"A": np.arange(6, dtype="int64"),
"B": Series(list("aabbca")).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.sort_index(ascending=False)
expected = df.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_frame_equal(result, expected)
def test_sort_index(self):
# GH#13496
frame = DataFrame(
np.arange(16).reshape(4, 4),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
# axis=0 : sort rows by index labels
unordered = frame.loc[[3, 2, 4, 1]]
result = unordered.sort_index(axis=0)
expected = frame
tm.assert_frame_equal(result, expected)
result = unordered.sort_index(ascending=False)
expected = frame[::-1]
tm.assert_frame_equal(result, expected)
# axis=1 : sort columns by column names
unordered = frame.iloc[:, [2, 1, 3, 0]]
result = unordered.sort_index(axis=1)
tm.assert_frame_equal(result, frame)
result = unordered.sort_index(axis=1, ascending=False)
expected = frame.iloc[:, ::-1]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", ["A", 0]) # GH#21052
def test_sort_index_multiindex(self, level):
# GH#13496
# sort rows by specified level of multi-index
mi = MultiIndex.from_tuples(
[[2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list("ABC")
)
df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi)
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 2], [2, 1, 3]], names=list("ABC")
)
expected = DataFrame([[5, 6], [3, 4], [1, 2]], index=expected_mi)
result = df.sort_index(level=level)
tm.assert_frame_equal(result, expected)
# sort_remaining=False
expected_mi = MultiIndex.from_tuples(
[[1, 1, 1], [2, 1, 3], [2, 1, 2]], names=list("ABC")
)
expected = DataFrame([[5, 6], [1, 2], [3, 4]], index=expected_mi)
result = df.sort_index(level=level, sort_remaining=False)
tm.assert_frame_equal(result, expected)
def test_sort_index_intervalindex(self):
# this is a de-facto sort via unstack
# confirming that we sort in the order of the bins
y = Series(np.random.randn(100))
x1 = Series(np.sign(np.random.randn(100)))
x2 = pd.cut(Series(np.random.randn(100)), bins=[-3, -0.5, 0, 0.5, 3])
model = pd.concat([y, x1, x2], axis=1, keys=["Y", "X1", "X2"])
result = model.groupby(["X1", "X2"], observed=True).mean().unstack()
expected = IntervalIndex.from_tuples(
[(-3.0, -0.5), (-0.5, 0.0), (0.0, 0.5), (0.5, 3.0)], closed="right"
)
result = result.columns.levels[1].categories
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, True, [0, 1, 2]),
({"A": [1, 2, 3]}, {"A": [2, 3, 1]}, False, False, [5, 3, 2]),
({"A": [1, 2, 3]}, {"A": [1, 3, 2]}, True, False, [2, 3, 5]),
],
)
def test_sort_index_ignore_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114
original_index = [2, 5, 3]
df = DataFrame(original_dict, index=original_index)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=original_index))
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ascending, ignore_index, output_index",
[
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
True,
[0, 1],
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [1, 2], "M2": [3, 4]},
True,
False,
MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB")),
),
(
{"M1": [1, 2], "M2": [3, 4]},
{"M1": [2, 1], "M2": [4, 3]},
False,
False,
MultiIndex.from_tuples([[3, 4], [2, 1]], names=list("AB")),
),
],
)
def test_sort_index_ignore_index_multi_index(
self, inplace, original_dict, sorted_dict, ascending, ignore_index, output_index
):
# GH 30114, this is to test ignore_index on MulitIndex of index
mi = MultiIndex.from_tuples([[2, 1], [3, 4]], names=list("AB"))
df = DataFrame(original_dict, index=mi)
expected_df = DataFrame(sorted_dict, index=output_index)
kwargs = {
"ascending": ascending,
"ignore_index": ignore_index,
"inplace": inplace,
}
if inplace:
result_df = df.copy()
result_df.sort_index(**kwargs)
else:
result_df = df.sort_index(**kwargs)
tm.assert_frame_equal(result_df, expected_df)
tm.assert_frame_equal(df, DataFrame(original_dict, index=mi))
def test_sort_index_categorical_multiindex(self):
# GH#15058
df = DataFrame(
{
"a": range(6),
"l1": pd.Categorical(
["a", "a", "b", "b", "c", "c"],
categories=["c", "a", "b"],
ordered=True,
),
"l2": [0, 1, 0, 1, 0, 1],
}
)
result = df.set_index(["l1", "l2"]).sort_index()
expected = DataFrame(
[4, 5, 0, 1, 2, 3],
columns=["a"],
index=MultiIndex(
levels=[
CategoricalIndex(
["c", "a", "b"],
categories=["c", "a", "b"],
ordered=True,
name="l1",
dtype="category",
),
[0, 1],
],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=["l1", "l2"],
),
)
tm.assert_frame_equal(result, expected)
def test_sort_index_and_reconstruction(self):
# GH#15622
# lexsortedness should be identical
# across MultiIndex construction methods
df = DataFrame([[1, 1], [2, 2]], index=list("ab"))
expected = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_tuples(
[(0.5, "a"), (0.5, "b"), (0.8, "a"), (0.8, "b")]
),
)
assert expected.index.is_lexsorted()
result = DataFrame(
[[1, 1], [2, 2], [1, 1], [2, 2]],
index=MultiIndex.from_product([[0.5, 0.8], list("ab")]),
)
result = result.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
'''
Runs simple machine learning models along with all their model metrics
'''
import pandas as pd
import os
import glob
import numpy as np
def balance_classes(a, b):
extra = max(a,b) - min(a,b)
remove = int(extra / 8)
return remove
path = os.getcwd()
os.chdir(path+'/non_decomp')
non_files = glob.glob('*.csv')
non_frames = {}
non_size = 0
for n in non_files:
df = | pd.read_csv(n) | pandas.read_csv |
from math import radians, cos, sin, asin, sqrt
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import numpy as np
from datetime import date, timedelta
from pandas.tseries.offsets import DateOffset
from math import radians, cos, sin, asin, sqrt
import folium
import plotly.graph_objects as go
import json
SPD_data = pd.read_csv('sample_2018_2019.csv',delimiter = ',')
SPD_data.sort_values(by='Report DateTime', ascending = True, inplace = True)
SPD_data['coordinates'] = SPD_data[['Latitude', 'Longitude']].values.tolist()
SPD_data = SPD_data.iloc[:100000,:]
def crimes_in_radius_dates(coord, radius, start_date, end_date):
df = SPD_data
df['Report DateTime']=pd.to_datetime(df['Report DateTime']).dt.date
date_mask = (pd.to_datetime(df['Report DateTime']) >= start_date) & (pd.to_datetime(df['Report DateTime']) <= end_date)
dff = df[date_mask]
result = [point_in_radius(value[0],value[1],coord[0],coord[1],radius)
for value in dff['coordinates']]
return dff[result]
def point_in_radius(lat1, lon1, lat2, lon2, radius):
# """
# Calculate the great circle distance between two points
# on the earth (specified in decimal degrees)
# """
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 3956 # Radius of earth in kilometers. Use 3956 for miles
if c*r<=int(radius):
return True
else:
return False
def address_to_coord(address_string):
result = address_string.replace(' ','+')
query = f'https://nominatim.openstreetmap.org/search?q={result}&format=geojson'
response = requests.get(f'https://nominatim.openstreetmap.org/search?q={query}&format=geojson')
return(response.json())
def crime_marker(coord,category,map):
colors = {'PROPERTY':'Blue','PERSON':'Red','SOCIETY':'#009933'}
feature_property = folium.FeatureGroup('PROPERTY')
feature_person = folium.FeatureGroup('PERSON')
feature_society = folium.FeatureGroup('SOCIETY')
group = {'PROPERTY':feature_property,'PERSON':feature_person,'SOCIETY':feature_society}
for x, y in zip(coord, category):
folium.CircleMarker(
location = x,
radius = 3,
popup = y,
color = colors[y],
fill = True,
fill_color = colors[y]
).add_to(group[y])
for key in group.keys():
group[key].add_to(map)
def crime_table(data,type, start, end):
df =data[data['Crime Against Category'] == type].sort_values('Report DateTime', ascending = True)
#df['date']=pd.to_datetime(df['Report DateTime']).dt.date
date_mask = (pd.to_datetime(df['Report DateTime']) >= start) & (pd.to_datetime(df['Report DateTime']) <= end)
return df[date_mask].groupby('Offense').count()['Report Number'].sort_values(ascending = False).reset_index()
def crime_trend_data(data,type, end_date):
df =data[data['Crime Against Category'] == type].sort_values('Report DateTime', ascending = True)
date_mask = ( | pd.to_datetime(df['Report DateTime']) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : ML Studio #
# Version : 0.1.0 #
# File : regression.py #
# Python : 3.8.3 #
# -------------------------------------------------------------------------- #
# Author : <NAME> #
# Company : DecisionScients #
# Email : <EMAIL> #
# URL : https://github.com/decisionscients/MLStudio #
# -------------------------------------------------------------------------- #
# Created : Friday, April 10th 2020, 3:27:23 pm #
# Last Modified : Wednesday, June 10th 2020, 9:11:49 pm #
# Modified By : <NAME> (<EMAIL>) #
# -------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 DecisionScients #
# =========================================================================== #
#%%
from collections import OrderedDict
import os
from pathlib import Path
import sys
homedir = str(Path(__file__).parents[4])
demodir = str(Path(__file__).parents[2])
datadir = os.path.join(demodir, "data")
sys.path.append(homedir)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.datasets import make_regression
from sklearn.linear_model import SGDRegressor, LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold, GridSearchCV
from sklearn.pipeline import Pipeline
from mlstudio.supervised.machine_learning.gradient_descent import GDRegressor
from mlstudio.supervised.algorithms.optimization.services.loss import Quadratic
from mlstudio.supervised.algorithms.optimization.services.regularizers import L1, L2, L1_L2
from mlstudio.supervised.metrics.regression import R2
from mlstudio.supervised.algorithms.optimization.observers.early_stop import EarlyStop
from mlstudio.utils.data_manager import StandardScaler, data_split
from mlstudio.utils.file_manager import save_fig
def get_data():
X, y = make_regression(n_samples=100, n_features=10,
n_informative=5, bias=0.5, effective_rank=8,
noise=1.0, random_state=5)
scaler = StandardScaler()
X = scaler.fit_transform(X)
return X, y
def get_sgdregressor_results(X,y, algorithm):
params = {'penalty': ['l1', 'l2', 'elasticnet'],
'alpha': [0.001, 0.01, 0.1],
'l1_ratio': [0.5, 0.25, 0.15],
'random_state': [50],
'learning_rate': ['constant', 'optimal', 'invscaling', 'adaptive'],
'eta0': [0.1, 0.01, 0.001,]}
estimator = SGDRegressor()
clf = GridSearchCV(estimator, params, scoring='r2')
clf.fit(X, y)
# Create scores dataframe
scores = OrderedDict()
scores['Algorithm'] = algorithm
scores[r'$R^2$']= [max(0,x) for x in clf.cv_results_['mean_test_score']]
scores = pd.DataFrame.from_dict(scores)
# Create times dataframe
times = OrderedDict()
times['Algorithm'] = algorithm
times['Fit Times (secs)']= clf.cv_results_['mean_fit_time']
times = pd.DataFrame.from_dict(times)
return scores, times
def get_mlstudio_results(X, y, algorithm, batch_size):
params = {'eta0': [0.1, 0.01, 0.001],
'observers': [[EarlyStop(mode='active', metric='val_score', epsilon=0.01)]],
'objective': [Quadratic(regularizer=L1(alpha=0.001)),
Quadratic(regularizer=L1(alpha=0.01)),
Quadratic(regularizer=L1(alpha=0.1)),
Quadratic(regularizer=L2(alpha=0.001)),
Quadratic(regularizer=L2(alpha=0.01)),
Quadratic(regularizer=L2(alpha=0.1)),
Quadratic(regularizer=L1_L2(alpha=0.001, ratio=0.5)),
Quadratic(regularizer=L1_L2(alpha=0.001, ratio=0.25)),
Quadratic(regularizer=L1_L2(alpha=0.001, ratio=0.15)),
Quadratic(regularizer=L1_L2(alpha=0.01, ratio=0.5)),
Quadratic(regularizer=L1_L2(alpha=0.01, ratio=0.25)),
Quadratic(regularizer=L1_L2(alpha=0.01, ratio=0.15)),
Quadratic(regularizer=L1_L2(alpha=0.1, ratio=0.5)),
Quadratic(regularizer=L1_L2(alpha=0.1, ratio=0.25)),
Quadratic(regularizer=L1_L2(alpha=0.1, ratio=0.15))],
'epochs': [1000], 'batch_size': batch_size,
'random_state' : [50]}
estimator = GDRegressor()
clf = GridSearchCV(estimator, params, scoring='r2')
clf.fit(X,y)
# Create scores dataframe
scores = OrderedDict()
scores['Algorithm'] = algorithm
scores[r'$R^2$']= [max(0,x) for x in clf.cv_results_['mean_test_score']]
scores = | pd.DataFrame.from_dict(scores) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
__all__ = [
"read_raw_data",
"read_processed_data",
"read_processed_fillna_data",
"change_dtype",
"map_variable",
"merge_df",
"reduce_memory_usage",
"remove_normal_outlier"
]
def read_raw_data(
logger, data_dir, index_col_name=None, train=True, test=True, sample_submission=True
):
"""Read all the different data files
"""
logger.info(f"Reading Data from {data_dir}...")
train_df = None
test_df = None
sample_submission_df = None
if train:
train_df = pd.read_csv(f"{data_dir}/train.csv", index_col=index_col_name)
logger.info(f"Shape of train_df : {train_df.shape}")
if test:
test_df = | pd.read_csv(f"{data_dir}/test.csv", index_col=index_col_name) | pandas.read_csv |
import string
import numpy as np
import re
import random
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
label_encoder = LabelEncoder()
def clean_str_new(s):
"""
Adapted from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py for Italian
"""
s = re.sub(r"\'s", "", s)
s = re.sub(r"\'ve", "have", s)
s = re.sub(r"n\'t", " not", s)
s = re.sub(r"\'re", " are", s)
s = re.sub(r"\'d", " would", s)
s = re.sub(r"\'ll", " will", s)
s = re.sub(r"\'", " ", s)
punc = re.compile('[%s]' % re.escape(string.punctuation))
s = punc.sub('', s) # removes punctuation, not accents
DIGITS = re.compile("[0-9]", re.UNICODE)
s = DIGITS.sub("#", s)
s = re.sub(r"\s{2,}", " ", s)
s = s.lower()
s = s.strip()
return s
def remove_double_spaces(s):
s = re.sub(r"\s{2,}", " ", s)
return s
s = '<NAME>or, is from 3.2. But'
def clean_str(s):
"""
Tokenization/s cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
s = re.sub(r"[^\\p{L}\\s]", " ", s) # This removes accents, which we want.
s = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", s) #This removes accents, which we want.
s = re.sub(r"\'s", "", s)
s = re.sub(r"\'ve", "have", s)
s = re.sub(r"n\'t", " not", s)
s = re.sub(r"\'re", " are", s)
s = re.sub(r"\'d", " would", s)
s = re.sub(r"\'ll", " will", s)
s = re.sub(r",", "", s) #s = re.sub(r",", " ,", s)
s = re.sub(r"!", "", s)
# s = re.sub(r"\(", "\(", s)
# s = re.sub(r"\)", "\) ", s)
s = re.sub(r"\?", "", s)
s = re.sub(r"\s{2,}", " ", s)
s = re.sub(r" ", " ", s)
return s.strip().lower()
def load_output_layers(path_to_dir):
'''These are the output_layers of the Xvalidation set, 100 sentences per 130 categories. Good for RSMs'''
loaded = np.load(path_to_dir+'output_layers.npz')
layers = []
layer1= pd.DataFrame(loaded['a'])
layers.append(layer1)
layer2 = pd.DataFrame(loaded['b'])
layers.append(layer2)
try:
layer3 = pd.DataFrame(loaded['c'])
layers.append(layer3)
except: pass
try:
layer4 = pd.DataFrame(loaded['d'])
layers.append(layer4)
except: pass
try:
layer5 = pd.DataFrame(loaded['e'])
layers.append(layer5)
except: pass
try:
layer6 = pd.DataFrame(loaded['f'])
layers.append(layer6)
except: pass
try:
layer7 = pd.DataFrame(loaded['g'])
layers.append(layer7)
except: pass
try:
layer8 = pd.DataFrame(loaded['h'])
layers.append(layer8)
except: pass
try:
layer9 = pd.DataFrame(loaded['i'])
layers.append(layer9)
except: pass
return np.array(layers)
import codecs
def load_data(train_path,categories,shuffle=False,one_hot=False):
labels = []
x_text_list = []
class_number = 0
for filename in categories:
with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text1 = [line.strip() for line in f]
x_text1 = [clean_str_new(sent) for sent in x_text1]
x_text_list.append(x_text1)
labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
class_number += 1
print(str(class_number) + ' classes added')
x_text = [item for sublist in x_text_list for item in sublist]
# one hot vectors for labels
labels = [item for sublist in labels for item in sublist]
if one_hot:
a = np.array(labels)
b = np.zeros((len(labels), len(categories)), dtype=int)
b[np.arange(len(labels)), a] = 1
y = b.tolist() # to shuffle it
else:
y = labels[:]
if shuffle:
combined = list(zip(x_text, y))
random.shuffle(combined)
x_text[:], y[:] = zip(*combined)
# y = np.asarray(y)
return [x_text, y]
# def load_data_uppercase(train_path,categories,shuffle=False,one_hot=False):
# labels = []
# x_text_list = []
# class_number = 0
# for filename in categories:
# with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
# x_text1 = [line.strip() for line in f]
# x_text1 = [clean_str_uppercase(sent) for sent in x_text1]
# x_text_list.append(x_text1)
# labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
# class_number += 1
# print(str(class_number) + ' classes added')
# x_text = [item for sublist in x_text_list for item in sublist]
# # one hot vectors for labels
# labels = [item for sublist in labels for item in sublist]
# if one_hot:
# a = np.array(labels)
# b = np.zeros((len(labels), len(categories)), dtype=int)
# b[np.arange(len(labels)), a] = 1
# y = b.tolist() # to shuffle it
# else:
# y = labels[:]
# if shuffle:
# combined = list(zip(x_text, y))
# random.shuffle(combined)
# x_text[:], y[:] = zip(*combined)
# # y = np.asarray(y)
# return [x_text, y]
def load_data_raw(train_path, categories, shuffle=False, one_hot=False):
labels = []
x_text_list = []
class_number = 0
for filename in categories:
with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text1 = [line.strip() for line in f]
x_text1 = [remove_double_spaces(sent) for sent in x_text1]
x_text_list.append(x_text1)
labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
class_number += 1
print(str(class_number) + ' classes added')
x_text = [item for sublist in x_text_list for item in sublist]
# one hot vectors for labels
labels = [item for sublist in labels for item in sublist]
if one_hot:
a = np.array(labels)
b = np.zeros((len(labels), len(categories)), dtype=int)
b[np.arange(len(labels)), a] = 1
y = b.tolist() # to shuffle it
else:
y = labels[:]
if shuffle:
combined = list(zip(x_text, y))
random.shuffle(combined)
x_text[:], y[:] = zip(*combined)
# y = np.asarray(y)
return [x_text, y]
def load_whole_dataset_raw(train_path,validation_path, test_path,categories,load_all=True, shuffle=False,one_hot=False):
labels = []
x_text_list = []
class_number = 0
for filename in categories:
with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text1 = [line.strip() for line in f]
x_text1 = [remove_double_spaces(sent) for sent in x_text1]
x_text_list.append(x_text1)
labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
if load_all:
with codecs.open(validation_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text2 = [line.strip() for line in f]
x_text2 = [remove_double_spaces(sent) for sent in x_text2]
x_text_list.append(x_text2)
with codecs.open(test_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text3 = [line.strip() for line in f]
x_text3 = [remove_double_spaces(sent) for sent in x_text3]
x_text_list.append(x_text3)
labels.append(np.full(len(x_text2), class_number, dtype=np.int).tolist())
labels.append(np.full(len(x_text3), class_number, dtype=np.int).tolist())
class_number += 1
print(str(class_number) + ' classes added')
x_text = [item for sublist in x_text_list for item in sublist]
# one hot vectors for labels
labels = [item for sublist in labels for item in sublist]
if one_hot:
a = np.array(labels)
b = np.zeros((len(labels), len(categories)), dtype=int)
b[np.arange(len(labels)), a] = 1
y = b.tolist() # to shuffle it
else:
y = labels[:]
if shuffle:
combined = list(zip(x_text, y))
random.shuffle(combined)
x_text[:], y[:] = zip(*combined)
# y = np.asarray(y)
return [x_text, y]
def load_whole_dataset(train_path,validation_path, test_path,categories,load_all=True, shuffle=False,one_hot=False):
labels = []
x_text_list = []
class_number = 0
for filename in categories:
with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text1 = [line.strip() for line in f]
x_text1 = [clean_str_new(sent) for sent in x_text1]
x_text_list.append(x_text1)
labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
if load_all:
with codecs.open(validation_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text2 = [line.strip() for line in f]
x_text2 = [clean_str_new(sent) for sent in x_text2]
x_text_list.append(x_text2)
with codecs.open(test_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text3 = [line.strip() for line in f]
x_text3 = [clean_str_new(sent) for sent in x_text3]
x_text_list.append(x_text3)
labels.append(np.full(len(x_text2), class_number, dtype=np.int).tolist())
labels.append(np.full(len(x_text3), class_number, dtype=np.int).tolist())
class_number += 1
print(str(class_number) + ' classes added')
x_text = [item for sublist in x_text_list for item in sublist]
# one hot vectors for labels
labels = [item for sublist in labels for item in sublist]
if one_hot:
a = np.array(labels)
b = np.zeros((len(labels), len(categories)), dtype=int)
b[np.arange(len(labels)), a] = 1
y = b.tolist() # to shuffle it
else:
y = labels[:]
if shuffle:
combined = list(zip(x_text, y))
random.shuffle(combined)
x_text[:], y[:] = zip(*combined)
# y = np.asarray(y)
return [x_text, y]
# The preprocessing
def load_all_data(train_path,validation_path, categories,shuffle=False,one_hot=False):
labels = []
x_text_list = []
class_number = 0
for filename in categories:
with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text1 = [line.strip() for line in f]
x_text1 = [clean_str_new(sent) for sent in x_text1]
x_text_list.append(x_text1)
with codecs.open(validation_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text2 = [line.strip() for line in f]
x_text2 = [clean_str_new(sent) for sent in x_text2]
x_text_list.append(x_text2)
labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
labels.append(np.full(len(x_text2), class_number, dtype=np.int).tolist())
class_number += 1
print(str(class_number) + ' classes added')
x_text = [item for sublist in x_text_list for item in sublist]
# one hot vectors for labels
labels = [item for sublist in labels for item in sublist]
if one_hot:
a = np.array(labels)
b = np.zeros((len(labels), len(categories)), dtype=int)
b[np.arange(len(labels)), a] = 1
y = b.tolist() # to shuffle it
else:
y = labels[:]
if shuffle:
combined = list(zip(x_text, y))
random.shuffle(combined)
x_text[:], y[:] = zip(*combined)
# y = np.asarray(y)
return [x_text, y]
def load_train_test(train_path,validation_path, test_path, categories,shuffle=False,one_hot=False):
labels = []
x_text_list = []
class_number = 0
for filename in categories:
with codecs.open(train_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text1 = [line.strip() for line in f]
x_text1 = [clean_str_new(sent) for sent in x_text1]
x_text_list.append(x_text1)
labels.append(np.full(len(x_text1), class_number, dtype=np.int).tolist())
with codecs.open(validation_path + filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text2 = [line.strip() for line in f]
x_text2 = [clean_str_new(sent) for sent in x_text2]
x_text_list.append(x_text2)
with codecs.open(test_path+ filename + '.txt', "r", encoding='utf-8', errors='ignore') as f:
x_text3 = [line.strip() for line in f]
x_text3 = [clean_str_new(sent) for sent in x_text3]
x_text_list.append(x_text3)
labels.append(np.full(len(x_text2), class_number, dtype=np.int).tolist())
labels.append(np.full(len(x_text3), class_number, dtype=np.int).tolist())
class_number += 1
print(str(class_number) + ' classes added')
x_text = [item for sublist in x_text_list for item in sublist]
# one hot vectors for labels
labels = [item for sublist in labels for item in sublist]
if one_hot:
a = np.array(labels)
b = np.zeros((len(labels), len(categories)), dtype=int)
b[np.arange(len(labels)), a] = 1
y = b.tolist() # to shuffle it
else:
y = labels[:]
if shuffle:
combined = list(zip(x_text, y))
random.shuffle(combined)
x_text[:], y[:] = zip(*combined)
# y = np.asarray(y)
return [x_text, y]
def load_sentences(load_sentences_from, input_dir):
# call file 'sentences'
if load_sentences_from == 'manually':
# Just write them here
sentences0 = ['I am a boy.', 'I am a boy', 'I am a girl']
elif load_sentences_from == 'txt':
# Load from txt
with open(input_dir + 'sentences.txt', 'r') as f:
sentences0 = f.read().splitlines()
elif load_sentences_from == 'csv':
# Load from csv.
sentences0 = np.array(pd.read_csv(input_dir + 'sentences.csv', header=None)).flatten()
return sentences0
# Preprocess/clean text
# def encode1(Ytrain):
# integer_encoded = label_encoder.fit_transform(Ytrain)
# # binary encode
# onehot_encoder = OneHotEncoder(sparse=False)
# integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
# Ytrain_integer = [item for sublist in integer_encoded for item in sublist]
# Ytrain_encoded = onehot_encoder.fit_transform(integer_encoded)
# return Ytrain_encoded, Ytrain_integer
def encode_sentences(Xtrain, sentences, sequence_length=26):
# Turn words into IDs based on training data
tokenizer = Tokenizer()
tokenizer.fit_on_texts(Xtrain)
sequences = tokenizer.texts_to_sequences(sentences)
sentences_encoded = pad_sequences(sequences, maxlen=sequence_length)
return sentences_encoded
def load_output_layers(path_to_dir):
layers = []
loaded = np.load(path_to_dir+'output_layers.npz')
layers.append(pd.DataFrame(loaded['a']))
layers.append(pd.DataFrame(loaded['b']))
layers.append(pd.DataFrame(loaded['c']))
try: layers.append(pd.DataFrame(loaded['d']))
except: pass
try: layers.append(pd.DataFrame(loaded['e']))
except: pass
try: layers.append(pd.DataFrame(loaded['f']))
except: pass
try: layers.append( | pd.DataFrame(loaded['g']) | pandas.DataFrame |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(
TypeError, match="Already tz-aware, use tz_convert to convert"
):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
expected = idx._with_freq(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp._with_freq(None))
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool"))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
msg = "Length of ambiguous bool-array must be the same size as vals"
with pytest.raises(Exception, match=msg):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
date_range(
"2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H"
)
times = date_range(
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
[
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
# construction with an nonexistent end-point
with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"):
date_range(
"2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H"
)
times = date_range(
"2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option
)
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range("1/1/2009", "1/1/2010")
dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
@pytest.mark.parametrize(
"method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]]
)
def test_dti_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
if method == "raise":
with pytest.raises(pytz.NonExistentTimeError, match="2015-03-29 02:00:00"):
dti.tz_localize(tz, nonexistent=method)
elif exp == "invalid":
msg = (
"The nonexistent argument must be one of "
"'raise', 'NaT', 'shift_forward', 'shift_backward' "
"or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=method)
else:
result = dti.tz_localize(tz, nonexistent=method)
expected = DatetimeIndex([exp] * n, tz=tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
dti = DatetimeIndex([Timestamp(start_ts)])
result = dti.tz_localize(tz, nonexistent=shift)
expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917
tz = tz_type + "Europe/Warsaw"
dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")])
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.parametrize(
"timezone",
[
"US/Pacific",
"US/Eastern",
"UTC",
"Asia/Kolkata",
"Asia/Shanghai",
"Australia/Canberra",
],
)
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
# ------------------------------------------------------------
# DatetimeIndex.__new__
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_constructor_static_tzinfo(self, prefix):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST")
index.hour
index[0]
def test_dti_constructor_with_fixed_tz(self):
off = FixedOffset(420, "+07:00")
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00")
assert (rng.values == rng3.values).all()
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_convert_datetime_list(self, tzstr):
dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo")
dr2 = DatetimeIndex(list(dr), name="foo", freq="D")
tm.assert_index_equal(dr, dr2)
def test_dti_construction_univalent(self):
rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern")
rng2 = DatetimeIndex(data=rng, tz="US/Eastern")
tm.assert_index_equal(rng, rng2)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_from_tzaware_datetime(self, tz):
d = [datetime(2012, 8, 19, tzinfo=tz)]
index = DatetimeIndex(d)
assert timezones.tz_compare(index.tz, tz)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_constructors(self, tzstr):
"""Test different DatetimeIndex constructions with timezone
Follow-up of GH#4229
"""
arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]
idx1 = to_datetime(arr).tz_localize(tzstr)
idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
idx2 = idx2._with_freq(None) # the others all have freq=None
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
# -------------------------------------------------------------
# Unsorted
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_date_accessor(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_time_accessor(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_timetz_accessor(self, tz_naive_fixture):
# GH21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz)
result = index.timetz
tm.assert_numpy_array_equal(result, expected)
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_dti_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H")
assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq
t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T")
assert t4.tz_convert(tz="UTC").freq == t4.freq
def test_drop_dst_boundary(self):
# see gh-18031
tz = "Europe/Brussels"
freq = "15min"
start = Timestamp("201710290100", tz=tz)
end = Timestamp("201710290300", tz=tz)
index = pd.date_range(start=start, end=end, freq=freq)
expected = DatetimeIndex(
[
"201710290115",
"201710290130",
"201710290145",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290300",
],
tz=tz,
freq=freq,
ambiguous=[
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
],
)
result = index.drop(index[0])
tm.assert_index_equal(result, expected)
def test_date_range_localize(self):
rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern")
rng3 = date_range("3/11/2012 03:00", periods=15, freq="H")
rng3 = rng3.tz_localize("US/Eastern")
tm.assert_index_equal(rng._with_freq(None), rng3)
# DST transition time
val = rng[0]
exp = Timestamp("3/11/2012 03:00", tz="US/Eastern")
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(
["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H"
)
tm.assert_index_equal(rng, rng2)
exp = Timestamp("3/11/2012 00:00", tz="US/Eastern")
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp("3/11/2012 01:00", tz="US/Eastern")
assert exp.hour == 1
assert rng[1] == exp
rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern")
assert rng[2].hour == 3
def test_timestamp_equality_different_timezones(self):
utc_range = date_range("1/1/2000", periods=20, tz="UTC")
eastern_range = utc_range.tz_convert("US/Eastern")
berlin_range = utc_range.tz_convert("Europe/Berlin")
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_dti_intersection(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_dti_equals_with_tz(self):
left = date_range("1/1/2011", periods=100, freq="H", tz="utc")
right = date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern")
assert not left.equals(right)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_nat(self, tzstr):
idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT])
assert isna(idx[1])
assert idx[0].tzinfo is not None
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_astype_asobject_tzinfos(self, tzstr):
# GH#1345
# dates around a dst transition
rng = date_range("2/13/2010", "5/6/2010", tz=tzstr)
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_with_timezone_repr(self, tzstr):
rng = date_range("4/13/2010", "5/6/2010")
rng_eastern = rng.tz_localize(tzstr)
rng_repr = repr(rng_eastern)
assert "2010-04-13 00:00:00" in rng_repr
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_take_dont_lose_meta(self, tzstr):
rng = date_range("1/1/2000", periods=20, tz=tzstr)
result = rng.take(range(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_utc_box_timestamp_and_localize(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tzstr)
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
assert stamp == expected
assert stamp.tzinfo == expected.tzinfo
# right tzinfo
rng = date_range("3/13/2012", "3/14/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tzstr)
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr(
rng_eastern[0].tzinfo
)
def test_dti_to_pydatetime(self):
dt = dateutil.parser.parse("2012-06-13T01:39:00Z")
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
rng = date_range("2012-11-03 03:00", "2012-11-05 03:00", tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_dti_to_pydatetime_fizedtz(self):
dates = np.array(
[
datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off),
]
)
dti = DatetimeIndex(dates)
result = dti.to_pydatetime()
tm.assert_numpy_array_equal(dates, result)
result = dti._mpl_repr()
tm.assert_numpy_array_equal(dates, result)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")])
def test_with_tz(self, tz):
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq= | pd.offsets.Hour() | pandas.offsets.Hour |
"""Project: Eskapade - A python-based package for data analysis.
Class: RecordVectorizer
Created: 2016/11/08
Description:
Algorithm to perform the vectorization of an input column
of an input dataframe.
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
from functools import reduce
import pandas as pd
from pandas import DataFrame
from eskapade import DataStore
from eskapade import Link
from eskapade import StatusCode
from eskapade import process_manager
class RecordVectorizer(Link):
"""Vectorize data-frame columns.
Perform vectorization of input column of an input dataframe. E.g. a
columnn x with values 1, 2 is tranformed into columns x_1 and x_2, with
values True or False assigned per record.
"""
def __init__(self, **kwargs):
"""Initialize link instance.
Store and do basic check on the attributes of link RecordVectorizer.
:param str read_key: key to read dataframe from the data store. Dataframe of records that is to be transformed.
:param list columns: list of columns that are to be vectorized
:param str store_key: store key of output dataFrame. Default is read_key + '_vectorized'. (optional)
:param dict column_compare_with: dict of unique items per column with which column values are compared.
If not given, this is derived automatically from the column. (optional)
:param type astype: store answer of comparison of column with value as certain type. Default is bool. (optional)
"""
Link.__init__(self, kwargs.pop('name', 'RecordVectorizer'))
# process and register all relevant kwargs. kwargs are added as attributes of the link.
# second arg is default value for an attribute. key is popped from kwargs.
self._process_kwargs(kwargs,
read_key='',
store_key=None,
columns=[],
column_compare_with={},
astype=bool)
# check residual kwargs. exit if any present
self.check_extra_kwargs(kwargs)
def initialize(self):
"""Initialize the link.
Initialize and (further) check the assigned attributes of
RecordVectorizer.
"""
self.check_arg_types(read_key=str)
self.check_arg_types(recurse=True, allow_none=True, columns=str)
self.check_arg_vals('read_key')
if self.store_key is None:
self.store_key = self.read_key + '_vectorized'
self.logger.info('Store key was empty, has been set to "{key}".', key=self.store_key)
return StatusCode.Success
def execute(self):
"""Execute the link.
Perform vectorization input column 'column' of input dataframe.
Resulting dataset stored as new dataset.
"""
ds = process_manager.service(DataStore)
# basic checks on contensts of the data frame
if self.read_key not in ds:
raise KeyError('key "{}" not in data store'.format(self.read_key))
df = ds[self.read_key]
if not isinstance(df, DataFrame):
raise TypeError('retrieved object not of type pandas DataFrame')
if len(df.index) == 0:
raise AssertionError('dataframe "{}" is empty'.format(self.read_key))
for c in self.columns:
if c not in df.columns:
raise AssertionError('column "{}" not present in input data frame'.format(c))
# checks of column_compare_with
if isinstance(self.column_compare_with, str) and len(self.column_compare_with):
if self.column_compare_with not in ds:
raise KeyError('column compare with "{}" not found in data store'.format(self.column_compare_with))
self.column_compare_with = df[self.column_compare_with]
if not isinstance(self.column_compare_with, dict):
raise RuntimeError('column compare dict not set correctly')
for c in self.columns:
if c not in self.column_compare_with:
self.column_compare_with[c] = df[c].unique()
elif not isinstance(self.column_compare_with[c], list):
raise TypeError('column "{}" needs to be compared with list of values'.format(c))
# do vectorization for all columns, then merge
dfs = [record_vectorizer(df, c, self.column_compare_with[c], self.astype) for c in self.columns]
df_final = reduce(lambda left, right: pd.merge(left, right, left_index=True, right_index=True), dfs)
ds[self.store_key] = df_final
return StatusCode.Success
def record_vectorizer(df, column_to_vectorize, column_compare_set, astype=bool):
"""Vectorize data-frame column.
Takes the new record that is already transformed and vectorizes the
given columns.
:param df: dataframe of the new record to vectorize
:param str column_to_vectorize: string, column in the new record to vectorize.
:param list column_compare_set: list of values to compare the column with.
:returns: dataframe of the new records.
"""
df_new = | pd.DataFrame(index=df.index) | pandas.DataFrame |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Preprocess ieee-fraud-detection dataset.
(https://www.kaggle.com/c/ieee-fraud-detection).
Train shape:(590540,394),identity(144233,41)--isFraud 3.5%
Test shape:(506691,393),identity(141907,41)
############### TF Version: 1.13.1/Python Version: 3.7 ###############
"""
import os
import random
import warnings
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
# make all processes deterministic/固定随机数生成器的种子
# environ是一个字符串所对应环境的映像对象,PYTHONHASHSEED为其中的环境变量
# Python会用一个随机的种子来生成str/bytes/datetime对象的hash值;
# 如果该环境变量被设定为一个数字,它就被当作一个固定的种子来生成str/bytes/datetime对象的hash值
def set_seed(seed=0):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
# reduce memory for dataframe/优化dataframe数据格式,减少内存占用
def reduce_mem_usage(df, verbose=True):
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
reduction = 100*(start_mem-end_mem)/start_mem
if verbose:
print("Default Mem. {:.2f} Mb, Optimized Mem. {:.2f} Mb, Reduction {:.1f}%".
format(start_mem, end_mem, reduction))
return df
# [train+infer]离散特征编码:[NaN不编码]
def minify_identity_df(df):
df['id_12'] = df['id_12'].map({'Found': 1, 'NotFound': 0})
df['id_15'] = df['id_15'].map({'New': 2, 'Found': 1, 'Unknown': 0})
df['id_16'] = df['id_16'].map({'Found': 1, 'NotFound': 0})
df['id_23'] = df['id_23'].map({'IP_PROXY:TRANSPARENT': 3, 'IP_PROXY:ANONYMOUS': 2, 'IP_PROXY:HIDDEN': 1})
df['id_27'] = df['id_27'].map({'Found': 1, 'NotFound': 0})
df['id_28'] = df['id_28'].map({'New': 2, 'Found': 1})
df['id_29'] = df['id_29'].map({'Found': 1, 'NotFound': 0})
df['id_35'] = df['id_35'].map({'T': 1, 'F': 0})
df['id_36'] = df['id_36'].map({'T': 1, 'F': 0})
df['id_37'] = df['id_37'].map({'T': 1, 'F': 0})
df['id_38'] = df['id_38'].map({'T': 1, 'F': 0})
df['id_34'] = df['id_34'].fillna(':3')
df['id_34'] = df['id_34'].apply(lambda x: x.split(':')[1]).astype(np.int8)
df['id_34'] = np.where(df['id_34'] == 3, np.nan, df['id_34'])
df['id_33'] = df['id_33'].fillna('0x0')
df['id_33_0'] = df['id_33'].apply(lambda x: x.split('x')[0]).astype(int)
df['id_33_1'] = df['id_33'].apply(lambda x: x.split('x')[1]).astype(int)
df['id_33'] = np.where(df['id_33'] == '0x0', np.nan, df['id_33'])
df['DeviceType'].map({'desktop': 1, 'mobile': 0})
return df
if __name__ == "__main__":
print("========== 1.Set random seed ...")
SEED = 42
set_seed(SEED)
LOCAL_TEST = False
print("========== 2.Load csv data ...")
dir_data_csv = os.getcwd() + "\\ieee-fraud-detection\\"
train_tran = pd.read_csv(dir_data_csv + "\\train_transaction.csv")
train_iden = pd.read_csv(dir_data_csv + "\\train_identity.csv")
infer_tran = pd.read_csv(dir_data_csv + "\\test_transaction.csv")
infer_iden = | pd.read_csv(dir_data_csv + "\\test_identity.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.split('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.split('[,_]')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
def test_rsplit(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.rsplit('__')
tm.assert_series_equal(result, exp)
result = values.str.rsplit('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.rsplit('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.rsplit('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.rsplit('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
tm.assert_series_equal(result, exp)
result = values.str.rsplit('_', expand=False)
tm.assert_series_equal(result, exp)
# regex split is not supported by rsplit
values = Series([u('a,b_c'), u('c_d,e'), NA, u('f,g,h')])
result = values.str.rsplit('[,_]')
exp = Series([[u('a,b_c')], [u('c_d,e')], NA, [u('f,g,h')]])
tm.assert_series_equal(result, exp)
# setting max number of splits, make sure it's from reverse
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.rsplit('_', n=1)
exp = Series([['a_b', 'c'], ['c_d', 'e'], NA, ['f_g', 'h']])
tm.assert_series_equal(result, exp)
def test_split_noargs(self):
# #1859
s = Series(['<NAME>', '<NAME>'])
result = s.str.split()
expected = ['Travis', 'Oliphant']
assert result[1] == expected
result = s.str.rsplit()
assert result[1] == expected
def test_split_maxsplit(self):
# re.split 0, str.split -1
s = Series(['bd asdf jfg', 'kjasdflqw asdfnfk'])
result = s.str.split(n=-1)
xp = s.str.split()
tm.assert_series_equal(result, xp)
result = s.str.split(n=0)
tm.assert_series_equal(result, xp)
xp = s.str.split('asdf')
result = s.str.split('asdf', n=0)
tm.assert_series_equal(result, xp)
result = s.str.split('asdf', n=-1)
tm.assert_series_equal(result, xp)
def test_split_no_pat_with_nonzero_n(self):
s = Series(['split once', 'split once too!'])
result = s.str.split(n=1)
expected = Series({0: ['split', 'once'], 1: ['split', 'once too!']})
tm.assert_series_equal(expected, result, check_index_type=False)
def test_split_to_dataframe(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_unequal_splits', 'one_of_these_things_is_not'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'one'],
1: ['unequal', 'of'],
2: ['splits', 'these'],
3: [NA, 'things'],
4: [NA, 'is'],
5: [NA, 'not']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.split('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
with tm.assert_raises_regex(ValueError, "expand must be"):
s.str.split('_', expand="not_a_boolean")
def test_split_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.split('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_unequal_splits', 'one_of_these_things_is_not'])
result = idx.str.split('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'unequal', 'splits', NA, NA, NA
), ('one', 'of', 'these', 'things',
'is', 'not')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 6
with tm.assert_raises_regex(ValueError, "expand must be"):
idx.str.split('_', expand="not_a_boolean")
def test_rsplit_to_dataframe_expand(self):
s = Series(['nosplit', 'alsonosplit'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: Series(['nosplit', 'alsonosplit'])})
tm.assert_frame_equal(result, exp)
s = Series(['some_equal_splits', 'with_no_nans'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=2)
exp = DataFrame({0: ['some', 'with'],
1: ['equal', 'no'],
2: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
result = s.str.rsplit('_', expand=True, n=1)
exp = DataFrame({0: ['some_equal', 'with_no'], 1: ['splits', 'nans']})
tm.assert_frame_equal(result, exp)
s = Series(['some_splits', 'with_index'], index=['preserve', 'me'])
result = s.str.rsplit('_', expand=True)
exp = DataFrame({0: ['some', 'with'], 1: ['splits', 'index']},
index=['preserve', 'me'])
tm.assert_frame_equal(result, exp)
def test_rsplit_to_multiindex_expand(self):
idx = Index(['nosplit', 'alsonosplit'])
result = idx.str.rsplit('_', expand=True)
exp = idx
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True)
exp = MultiIndex.from_tuples([('some', 'equal', 'splits'), (
'with', 'no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 3
idx = Index(['some_equal_splits', 'with_no_nans'])
result = idx.str.rsplit('_', expand=True, n=1)
exp = MultiIndex.from_tuples([('some_equal', 'splits'),
('with_no', 'nans')])
tm.assert_index_equal(result, exp)
assert result.nlevels == 2
def test_split_nan_expand(self):
# gh-18450
s = Series(["foo,bar,baz", NA])
result = s.str.split(",", expand=True)
exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]])
tm.assert_frame_equal(result, exp)
# check that these are actually np.nan and not None
# TODO see GH 18463
# tm.assert_frame_equal does not differentiate
assert all(np.isnan(x) for x in result.iloc[1])
def test_split_with_name(self):
# GH 12617
# should preserve name
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.split(',')
exp = Series([['a', 'b'], ['c', 'd']], name='xxx')
tm.assert_series_equal(res, exp)
res = s.str.split(',', expand=True)
exp = DataFrame([['a', 'b'], ['c', 'd']])
tm.assert_frame_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.split(',')
exp = Index([['a', 'b'], ['c', 'd']], name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
res = idx.str.split(',', expand=True)
exp = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')])
assert res.nlevels == 2
tm.assert_index_equal(res, exp)
def test_partition_series(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([('a', '_', 'b_c'), ('c', '_', 'd_e'), NA,
('f', '_', 'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('a_b', '_', 'c'), ('c_d', '_', 'e'), NA,
('f_g', '_', 'h')])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.partition('__', expand=False)
exp = Series([('a', '__', 'b__c'), ('c', '__', 'd__e'), NA,
('f', '__', 'g__h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('__', expand=False)
exp = Series([('a__b', '__', 'c'), ('c__d', '__', 'e'), NA,
('f__g', '__', 'h')])
tm.assert_series_equal(result, exp)
# None
values = Series(['a b c', 'c d e', NA, 'f g h'])
result = values.str.partition(expand=False)
exp = Series([('a', ' ', 'b c'), ('c', ' ', 'd e'), NA,
('f', ' ', 'g h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition(expand=False)
exp = Series([('a b', ' ', 'c'), ('c d', ' ', 'e'), NA,
('f g', ' ', 'h')])
tm.assert_series_equal(result, exp)
# Not splited
values = Series(['abc', 'cde', NA, 'fgh'])
result = values.str.partition('_', expand=False)
exp = Series([('abc', '', ''), ('cde', '', ''), NA, ('fgh', '', '')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([('', '', 'abc'), ('', '', 'cde'), NA, ('', '', 'fgh')])
tm.assert_series_equal(result, exp)
# unicode
values = Series([u'a_b_c', u'c_d_e', NA, u'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Series([(u'a', u'_', u'b_c'), (u'c', u'_', u'd_e'),
NA, (u'f', u'_', u'g_h')])
tm.assert_series_equal(result, exp)
result = values.str.rpartition('_', expand=False)
exp = Series([(u'a_b', u'_', u'c'), (u'c_d', u'_', u'e'),
NA, (u'f_g', u'_', u'h')])
tm.assert_series_equal(result, exp)
# compare to standard lib
values = Series(['A_B_C', 'B_C_D', 'E_F_G', 'EFGHEF'])
result = values.str.partition('_', expand=False).tolist()
assert result == [v.partition('_') for v in values]
result = values.str.rpartition('_', expand=False).tolist()
assert result == [v.rpartition('_') for v in values]
def test_partition_index(self):
values = Index(['a_b_c', 'c_d_e', 'f_g_h'])
result = values.str.partition('_', expand=False)
exp = Index(np.array([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_',
'g_h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.rpartition('_', expand=False)
exp = Index(np.array([('a_b', '_', 'c'), ('c_d', '_', 'e'), (
'f_g', '_', 'h')]))
tm.assert_index_equal(result, exp)
assert result.nlevels == 1
result = values.str.partition('_')
exp = Index([('a', '_', 'b_c'), ('c', '_', 'd_e'), ('f', '_', 'g_h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
result = values.str.rpartition('_')
exp = Index([('a_b', '_', 'c'), ('c_d', '_', 'e'), ('f_g', '_', 'h')])
tm.assert_index_equal(result, exp)
assert isinstance(result, MultiIndex)
assert result.nlevels == 3
def test_partition_to_dataframe(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_')
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_')
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.partition('_', expand=True)
exp = DataFrame({0: ['a', 'c', np.nan, 'f'],
1: ['_', '_', np.nan, '_'],
2: ['b_c', 'd_e', np.nan, 'g_h']})
tm.assert_frame_equal(result, exp)
result = values.str.rpartition('_', expand=True)
exp = DataFrame({0: ['a_b', 'c_d', np.nan, 'f_g'],
1: ['_', '_', np.nan, '_'],
2: ['c', 'e', np.nan, 'h']})
tm.assert_frame_equal(result, exp)
def test_partition_with_name(self):
# GH 12617
s = Series(['a,b', 'c,d'], name='xxx')
res = s.str.partition(',')
exp = DataFrame({0: ['a', 'c'], 1: [',', ','], 2: ['b', 'd']})
tm.assert_frame_equal(res, exp)
# should preserve name
res = s.str.partition(',', expand=False)
exp = Series([('a', ',', 'b'), ('c', ',', 'd')], name='xxx')
tm.assert_series_equal(res, exp)
idx = Index(['a,b', 'c,d'], name='xxx')
res = idx.str.partition(',')
exp = MultiIndex.from_tuples([('a', ',', 'b'), ('c', ',', 'd')])
assert res.nlevels == 3
tm.assert_index_equal(res, exp)
# should preserve name
res = idx.str.partition(',', expand=False)
exp = Index(np.array([('a', ',', 'b'), ('c', ',', 'd')]), name='xxx')
assert res.nlevels == 1
tm.assert_index_equal(res, exp)
def test_pipe_failures(self):
# #2119
s = Series(['A|B|C'])
result = s.str.split('|')
exp = Series([['A', 'B', 'C']])
tm.assert_series_equal(result, exp)
result = s.str.replace('|', ' ')
exp = Series(['A B C'])
tm.assert_series_equal(result, exp)
def test_slice(self):
values = Series(['aafootwo', 'aabartwo', NA, 'aabazqux'])
result = values.str.slice(2, 5)
exp = Series(['foo', 'bar', NA, 'baz'])
tm.assert_series_equal(result, exp)
for start, stop, step in [(0, 3, -1), (None, None, -1), (3, 10, 2),
(3, 0, -1)]:
try:
result = values.str.slice(start, stop, step)
expected = Series([s[start:stop:step] if not isna(s) else NA
for s in values])
tm.assert_series_equal(result, expected)
except:
print('failed on %s:%s:%s' % (start, stop, step))
raise
# mixed
mixed = Series(['aafootwo', NA, 'aabartwo', True, datetime.today(),
None, 1, 2.])
rs = Series(mixed).str.slice(2, 5)
xp = Series(['foo', NA, 'bar', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.slice(2, 5, -1)
xp = Series(['oof', NA, 'rab', NA, NA, NA, NA, NA])
# unicode
values = Series([u('aafootwo'), u('aabartwo'), NA, u('aabazqux')])
result = values.str.slice(2, 5)
exp = Series([u('foo'), u('bar'), NA, u('baz')])
tm.assert_series_equal(result, exp)
result = values.str.slice(0, -1, 2)
exp = Series([u('afow'), u('abrw'), NA, u('abzu')])
tm.assert_series_equal(result, exp)
def test_slice_replace(self):
values = Series(['short', 'a bit longer', 'evenlongerthanthat', '', NA
])
exp = Series(['shrt', 'a it longer', 'evnlongerthanthat', '', NA])
result = values.str.slice_replace(2, 3)
tm.assert_series_equal(result, exp)
exp = Series(['shzrt', 'a zit longer', 'evznlongerthanthat', 'z', NA])
result = values.str.slice_replace(2, 3, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shzort', 'a zbit longer', 'evzenlongerthanthat', 'z', NA
])
result = values.str.slice_replace(2, 1, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shorz', 'a bit longez', 'evenlongerthanthaz', 'z', NA])
result = values.str.slice_replace(-1, None, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'zer', 'zat', 'z', NA])
result = values.str.slice_replace(None, -2, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['shortz', 'a bit znger', 'evenlozerthanthat', 'z', NA])
result = values.str.slice_replace(6, 8, 'z')
tm.assert_series_equal(result, exp)
exp = Series(['zrt', 'a zit longer', 'evenlongzerthanthat', 'z', NA])
result = values.str.slice_replace(-10, 3, 'z')
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip(self):
values = Series([' aa ', ' bb \n', NA, 'cc '])
result = values.str.strip()
exp = Series(['aa', 'bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series(['aa ', 'bb \n', NA, 'cc '])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([' aa', ' bb', NA, 'cc'])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_mixed(self):
# mixed
mixed = Series([' aa ', NA, ' bb \t\n', True, datetime.today(), None,
1, 2.])
rs = Series(mixed).str.strip()
xp = Series(['aa', NA, 'bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.lstrip()
xp = Series(['aa ', NA, 'bb \t\n', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rstrip()
xp = Series([' aa', NA, ' bb', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
def test_strip_lstrip_rstrip_unicode(self):
# unicode
values = Series([u(' aa '), u(' bb \n'), NA, u('cc ')])
result = values.str.strip()
exp = Series([u('aa'), u('bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
result = values.str.lstrip()
exp = Series([u('aa '), u('bb \n'), NA, u('cc ')])
tm.assert_series_equal(result, exp)
result = values.str.rstrip()
exp = Series([u(' aa'), u(' bb'), NA, u('cc')])
tm.assert_series_equal(result, exp)
def test_strip_lstrip_rstrip_args(self):
values = Series(['xxABCxx', 'xx BNSD', 'LDFJH xx'])
rs = values.str.strip('x')
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip('x')
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip('x')
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_strip_lstrip_rstrip_args_unicode(self):
values = Series([u('xxABCxx'), u('xx BNSD'), u('LDFJH xx')])
rs = values.str.strip(u('x'))
xp = Series(['ABC', ' BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
rs = values.str.lstrip(u('x'))
xp = Series(['ABCxx', ' BNSD', 'LDFJH xx'])
assert_series_equal(rs, xp)
rs = values.str.rstrip(u('x'))
xp = Series(['xxABC', 'xx BNSD', 'LDFJH '])
assert_series_equal(rs, xp)
def test_wrap(self):
# test values are: two words less than width, two words equal to width,
# two words greater than width, one word less than width, one word
# equal to width, one word greater than width, multiple tokens with
# trailing whitespace equal to width
values = Series([u('hello world'), u('hello world!'), u(
'hello world!!'), u('abcdefabcde'), u('abcdefabcdef'), u(
'abcdefabcdefa'), u('ab ab ab ab '), u('ab ab ab ab a'), u(
'\t')])
# expected values
xp = Series([u('hello world'), u('hello world!'), u('hello\nworld!!'),
u('abcdefabcde'), u('abcdefabcdef'), u('abcdefabcdef\na'),
u('ab ab ab ab'), u('ab ab ab ab\na'), u('')])
rs = values.str.wrap(12, break_long_words=True)
assert_series_equal(rs, xp)
# test with pre and post whitespace (non-unicode), NaN, and non-ascii
# Unicode
values = Series([' pre ', np.nan, u('\xac\u20ac\U00008000 abadcafe')
])
xp = Series([' pre', NA, u('\xac\u20ac\U00008000 ab\nadcafe')])
rs = values.str.wrap(6)
assert_series_equal(rs, xp)
def test_get(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.get(1)
expected = Series(['b', 'd', np.nan, 'g'])
tm.assert_series_equal(result, expected)
# mixed
mixed = Series(['a_b_c', NA, 'c_d_e', True, datetime.today(), None, 1,
2.])
rs = Series(mixed).str.split('_').str.get(1)
xp = Series(['b', NA, 'd', NA, NA, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.get(1)
expected = Series([u('b'), u('d'), np.nan, u('g')])
tm.assert_series_equal(result, expected)
# bounds testing
values = Series(['1_2_3_4_5', '6_7_8_9_10', '11_12'])
# positive index
result = values.str.split('_').str.get(2)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
# negative index
result = values.str.split('_').str.get(-3)
expected = Series(['3', '8', np.nan])
tm.assert_series_equal(result, expected)
def test_more_contains(self):
# PR #1179
s = Series(['A', 'B', 'C', 'Aaba', 'Baca', '', NA,
'CABA', 'dog', 'cat'])
result = s.str.contains('a')
expected = Series([False, False, False, True, True, False, np.nan,
False, False, True])
assert_series_equal(result, expected)
result = s.str.contains('a', case=False)
expected = Series([True, False, False, True, True, False, np.nan, True,
False, True])
assert_series_equal(result, expected)
result = s.str.contains('Aa')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba')
expected = Series([False, False, False, True, False, False, np.nan,
False, False, False])
assert_series_equal(result, expected)
result = s.str.contains('ba', case=False)
expected = Series([False, False, False, True, True, False, np.nan,
True, False, False])
assert_series_equal(result, expected)
def test_contains_nan(self):
# PR #14171
s = Series([np.nan, np.nan, np.nan], dtype=np.object_)
result = s.str.contains('foo', na=False)
expected = Series([False, False, False], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na=True)
expected = Series([True, True, True], dtype=np.bool_)
assert_series_equal(result, expected)
result = s.str.contains('foo', na="foo")
expected = Series(["foo", "foo", "foo"], dtype=np.object_)
assert_series_equal(result, expected)
result = s.str.contains('foo')
expected = Series([np.nan, np.nan, np.nan], dtype=np.object_)
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import os
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from os.path import join as pjoin
from cplvm import CPLVM
import matplotlib
font = {"size": 30}
matplotlib.rc("font", **font)
matplotlib.rcParams["text.usetex"] = True
DATA_DIR = "../../data/mix_seq/data/nutlin/"
if __name__ == "__main__":
latent_dim_shared = 2
latent_dim_foreground = 2
X_fname = pjoin(DATA_DIR, "dmso_expt1.csv")
Y_fname = pjoin(DATA_DIR, "nutlin_expt1.csv")
X_mutation_fname = pjoin(DATA_DIR, "p53_mutations_dmso.csv")
Y_mutation_fname = pjoin(DATA_DIR, "p53_mutations_nutlin.csv")
p53_mutations_X = pd.read_csv(X_mutation_fname, index_col=0)
p53_mutations_X.tp53_mutation[
p53_mutations_X.tp53_mutation == "Hotspot"
] = "Mutated"
p53_mutations_X.tp53_mutation[
p53_mutations_X.tp53_mutation == "Other"
] = "Wild-type"
p53_mutations_Y = pd.read_csv(Y_mutation_fname, index_col=0)
p53_mutations_Y.tp53_mutation[
p53_mutations_Y.tp53_mutation == "Hotspot"
] = "Mutated"
p53_mutations_Y.tp53_mutation[
p53_mutations_Y.tp53_mutation == "Other"
] = "Wild-type"
# Read in data
X = pd.read_csv(X_fname, index_col=0)
Y = pd.read_csv(Y_fname, index_col=0)
plt.figure(figsize=(21, 7))
mutations_X = p53_mutations_X
mutations_Y = p53_mutations_Y
idx_to_plot_Y = np.where(mutations_Y.tp53_mutation.values != "NotAvailable")[0]
###### PCA ######
# Run PCA
pca_reduced = PCA(n_components=2).fit_transform(np.concatenate([X, Y], axis=0))
pca_reduced_df = | pd.DataFrame(pca_reduced) | pandas.DataFrame |
"""Manages Training Data for the Musical MDN and can generate fake datsets for testing."""
import numpy as np
import pandas as pd
import random
def batch_generator(seq_len, batch_size, dim, corpus):
"""Returns a generator to cut up datasets into
batches of features and labels."""
# generator = batch_generator(SEQ_LEN, BATCH_SIZE, 3, corpus)
batch_X = np.zeros((batch_size, seq_len, dim))
batch_y = np.zeros((batch_size, dim))
while True:
for i in range(batch_size):
# choose random example
l = random.choice(corpus)
last_index = len(l) - seq_len - 1
start_index = np.random.randint(0, high=last_index)
batch_X[i] = l[start_index:start_index+seq_len]
batch_y[i] = l[start_index+1:start_index+seq_len+1] # .reshape(1,dim)
yield batch_X, batch_y
def generate_data():
"""Generating some Slightly fuzzy sine wave data."""
NSAMPLE = 50000
print("Generating", str(NSAMPLE), "toy data samples.")
t_data = np.float32(np.array(range(NSAMPLE)) / 10.0)
t_interval = t_data[1] - t_data[0]
t_r_data = np.random.normal(0, t_interval / 20.0, size=NSAMPLE)
t_data = t_data + t_r_data
r_data = np.random.normal(size=NSAMPLE)
x_data = np.sin(t_data) * 1.0 + (r_data * 0.05)
df = pd.DataFrame({'t': t_data, 'x': x_data})
df.t = df.t.diff()
df.t = df.t.fillna(1e-4)
print(df.describe())
return np.array(df)
def generate_synthetic_3D_data():
"""
Generates some slightly fuzzy sine wave data
in two dimensions (plus time).
"""
NSAMPLE = 50000
print("Generating", str(NSAMPLE), "toy data samples.")
t_data = np.float32(np.array(range(NSAMPLE)) / 10.0)
t_interval = t_data[1] - t_data[0]
t_r_data = np.random.normal(0, t_interval / 20.0, size=NSAMPLE)
t_data = t_data + t_r_data
r_data = np.random.normal(size=NSAMPLE)
x_data = (np.sin(t_data) + (r_data / 10.0) + 1) / 2.0
y_data = (np.sin(t_data * 3.0) + (r_data / 10.0) + 1) / 2.0
df = | pd.DataFrame({'a': x_data, 'b': y_data, 't': t_data}) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import sys
module_path = os.path.abspath(os.path.join("./src/"))
if module_path not in sys.path:
sys.path.append(module_path)
import numpy as np
import pandas as pd
import torch
from model.autoencoders import SHAENet, SHAE
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from utils.utils import FixRandomSeed, shae_criterion
def main():
PERMUTATION_REPS = 10
with open("config/config.json") as f:
config = json.load(f)
for cancer in config["cancers"]:
print(f"Starting: {cancer}")
data = pd.read_csv(
f"./data/processed/{cancer}/merged/{config['data_name_tcga']}"
)
X = data[data.columns[2:]]
X = X.loc[:, (X != X.iloc[0]).any()]
y_str = data["OS"].astype(str) + "|" + data["OS.time"].astype(str)
train_splits = pd.read_csv(
f"./data/splits/{cancer}/{config['train_split_name_tcga']}"
)
test_splits = pd.read_csv(
f"./data/splits/{cancer}/{config['test_split_name_tcga']}"
)
clinical_indices = [
i for i in range(len(X.columns)) if "clinical" in X.columns[i]
]
gex_indices = [
i for i in range(len(X.columns)) if "gex" in X.columns[i]
]
cnv_indices = [
i for i in range(len(X.columns)) if "cnv" in X.columns[i]
]
meth_indices = [
i for i in range(len(X.columns)) if "meth" in X.columns[i]
]
mirna_indices = [
i for i in range(len(X.columns)) if "mirna" in X.columns[i]
]
mut_indices = [
i for i in range(len(X.columns)) if "mut" in X.columns[i]
]
rppa_indices = [
i for i in range(len(X.columns)) if "rppa" in X.columns[i]
]
blocks = [
clinical_indices,
gex_indices,
cnv_indices,
meth_indices,
mirna_indices,
mut_indices,
rppa_indices,
]
# Make sure that all variables are considered in the blocks
assert sum([len(i) for i in blocks]) == X.shape[1]
model = "shaenet"
params = pd.read_csv(
f"./data/benchmarks/{cancer}/{model}_tuned_parameters_timed_euler.csv"
)
scores = pd.read_csv(
f"./data/benchmarks/{cancer}/{model}_tuned_scores_timed_euler.csv"
)
mapping = {
np.argmax(scores["concordance"]): "best_shae",
}
for i in list(mapping.keys()):
test_scores = []
train_scores = []
print(f"Split: {i+1}/10")
train_ix = train_splits.iloc[i, :].dropna().values
test_ix = test_splits.iloc[i, :].dropna().values
net = SHAENet(
module=SHAE,
criterion=shae_criterion,
max_epochs=config["epochs"],
lr=config["lr"],
train_split=None,
optimizer=torch.optim.Adam,
callbacks=[
("seed", FixRandomSeed(config["seed"])),
],
verbose=0,
batch_size=-1,
module__blocks=blocks,
)
pipe = make_pipeline(StandardScaler(), net)
pipe.set_params(**{key: val[i] for key, val in params.items()})
pipe.fit(
X.iloc[train_ix, :].to_numpy().astype(np.float32),
y_str.iloc[train_ix].to_numpy().astype(str),
)
test_scores.append(
pipe.score(
X.iloc[test_ix, :].to_numpy().astype(np.float32),
y_str.iloc[test_ix].to_numpy().astype(str),
)
)
train_scores.append(
pipe.score(
X.iloc[train_ix, :].to_numpy().astype(np.float32),
y_str.iloc[train_ix].to_numpy().astype(str),
)
)
train_feature_importances = {i: [] for i in range(len(blocks))}
test_feature_importances = {i: [] for i in range(len(blocks))}
rstate = np.random.RandomState(config["seed"])
for q in range(PERMUTATION_REPS):
print(f"Permutation: {q+1} / 10")
permuted_ix_train = np.arange(train_ix.shape[0])
permuted_ix_test = np.arange(test_ix.shape[0])
for block in range(len(blocks)):
rstate.shuffle(permuted_ix_train)
rstate.shuffle(permuted_ix_test)
X_train_permuted = (
X.iloc[train_ix, :]
.copy(deep=True)
.reset_index(drop=True)
)
X_train_permuted.iloc[
:, blocks[block]
] = X_train_permuted.iloc[
permuted_ix_train, blocks[block]
].reset_index(
drop=True
)
X_test_permuted = (
X.iloc[test_ix, :]
.copy(deep=True)
.reset_index(drop=True)
)
X_test_permuted.iloc[
:, blocks[block]
] = X_test_permuted.iloc[
permuted_ix_test, blocks[block]
].reset_index(
drop=True
)
train_feature_importance = (
1
- pipe.score(
X_train_permuted.to_numpy().astype(np.float32),
y_str[train_ix].to_numpy().astype(str),
)
) / (1 - train_scores[-1])
test_feature_importance = (
1
- pipe.score(
X_test_permuted.to_numpy().astype(np.float32),
y_str[test_ix].to_numpy().astype(str),
)
) / (1 - test_scores[-1])
train_feature_importances[block].append(
train_feature_importance
)
test_feature_importances[block].append(
test_feature_importance
)
importance_frame_train = pd.DataFrame(train_feature_importances)
importance_frame_test = | pd.DataFrame(test_feature_importances) | pandas.DataFrame |
from Modules.appLogger import application_logger
from Modules.DataLoader import predictionDataLoader
from Modules.SaveLoadModel import saveLoadModel
from Modules.DataPreprocessor import dataPreprocessor
import pandas as pd
class predictData:
"""
Class Name: predictData
Description: Predicts the rating of a restaurant based on the inputs.
Input: None
Output: CSV file containing the ratings of the restaurants given in the input file.
On Failure: Raise Exception
Written By: <NAME>
Version: 1.0
Revisions: None
"""
def __init__(self):
try:
self.prediction_logs = | pd.read_csv('Logs\\Prediction Logs\\prediction_logs.csv') | pandas.read_csv |
import numpy as np
import csv
import pandas as pd
from sklearn import decomposition
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import Pipeline
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
import pickle as pk
from pandas import DataFrame
from random import sample
from nltk.corpus import stopwords
from nltk.stem.porter import *
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
from collections import namedtuple
from gensim.models.doc2vec import Doc2Vec
from sklearn.metrics import classification_report
from sklearn.cluster import KMeans
from scipy import stats
from sklearn.metrics import roc_curve, auc
def loaddata(filename,instancecol):
file_reader = csv.reader(open(filename,'r'),delimiter=',')
x = []
y = []
for row in file_reader:
x.append(row[0:instancecol])
y.append(row[-1])
return np.array(x[1:]).astype(np.float32), np.array(y[1:]).astype(np.int)
def readdata(train_set_path, y_value):
x = []
y = []
stop_words = set(stopwords.words('english'))
with open(train_set_path, encoding="utf8") as infile:
for line in infile:
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
content = re.sub(r"(?:\@|https?\://)\S+", "", line)
toker = RegexpTokenizer(r'((?<=[^\w\s])\w(?=[^\w\s])|(\W))+', gaps=True)
word_tokens = toker.tokenize(content)
filtered_sentence = [lemmatizer.lemmatize(w) for w in word_tokens if not w in stop_words and w.isalpha()]
x.append(' '.join(filtered_sentence))
y.append(y_value)
x, y = np.array(x), np.array(y)
return x, y
def create_docmodel(x, y, feature_count):
docs = []
dfs = []
features_vectors = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale
from scipy.stats import pearsonr, spearmanr
from ripser import Rips
from dtw import dtw, accelerated_dtw
from datetime import timedelta, date
import pickle
import os
class dyads():
def __init__(self, dfi, dfp):
self.dfi = dfi
self.dfp = dfp
self.start_date = pd.to_datetime('1955-1-1', format='%Y-%m-%d')
self.end_date = pd.to_datetime('1978-12-31', format='%Y-%m-%d')
self.country_codes_i = {
'USA': ['USA'],
'USSR': ['USR'],
'China': ['CHN'],
'East-Germany': ['GME'],
'West-Germany': ['GMW'],
'Canada': ['CAD']
}
self.country_codes_p = {
'USA': ['USA'],
'USSR': ['SUN', 'RUS'],
'China': ['CHN'],
'East-Germany': ['DDR'],
'West-Germany': ['DEU'],
'Canada': ['CAN']
}
self.complete_dyads = None # track data with complete data in cor
pass
def filter_dates(self, dfi=None, dfp=None):
'''
filter by selected date range
'''
if dfi is None:
dfi_filt = self.dfi
dfp_filt = self.dfp
# convert to datetime
dfi_filt['date'] = pd.to_datetime(
dfi_filt['date'], format='%Y-%m-%d')
dfp_filt['date'] = pd.to_datetime(
dfp_filt['story_date'], format='%m/%d/%Y')
start = self.start_date
end = self.end_date
pass
else:
dfi_filt = dfi
dfp_filt = dfp
start = max(min(dfi_filt.date), min(dfp_filt.date))
end = min(max(dfi_filt.date), max(dfp_filt.date))
pass
# filter by start and end dates
dfi_filt = dfi_filt[(dfi_filt.date >= start) & (dfi_filt.date <= end)]
dfp_filt = dfp_filt[(dfp_filt.date >= start) & (dfp_filt.date <= end)]
return (dfi_filt, dfp_filt)
def initial_manupulations(self):
'''
rename and select columns
'''
self.dfp = self.dfp.rename(
columns={
'source_root': 'actor',
'target': 'something_else',
'target_root': 'target'
})
self.dfp = self.dfp[['date', 'actor', 'target', 'goldstein']]
self.dfi = self.dfi[['date', 'actor', 'target', 'scale']]
# implement azar weights
azar_weighting = [
92, 47, 31, 27, 14, 10, 6, 0, -6, -16, -29, -44, -50, -65, -102
]
self.dfp['score'] = self.dfp['goldstein']
self.dfi['score'] = [
azar_weighting[ind - 1] for ind in self.dfi['scale'].to_list()
]
# create time frame designations
self.dfp['year'] = pd.DatetimeIndex(self.dfp.date).to_period('Y')
self.dfi['year'] = pd.DatetimeIndex(self.dfi.date).to_period('Y')
self.dfp['month'] = pd.DatetimeIndex(self.dfp.date).to_period('M')
self.dfi['month'] = | pd.DatetimeIndex(self.dfi.date) | pandas.DatetimeIndex |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# In[3]:
import pandas as pd
import MySQLdb, os, pyodbc
from datetime import *
from dateutil.relativedelta import *
import numpy as np
from fn import *
def PP(df):
try:
print(df['LASTOCCURRENCE', 'DUR', 'DURCAT'])
except:
try:
print(df['LASTOCCURRENCE', 'DUR'])
except:
print(df['LASTOCCURRENCE'])
def series2df(sr1, sr2):
df = pd.concat([sr1, sr2], axis=1)
return df
def DateDiff(df, newcol, col1, col2 = False, DayFirst = True):
if col2 == False:
lscol = df[col1].to_list()
ls = list(map (lambda x: ((datetime.now() - datetime.strptime(x, "%d/%m/%Y %H:%M")).total_seconds())/60, lscol))
df[newcol] = np.array(ls)
else:
lscol1 = df[col1].to_list()
lscol2 = df[col2].to_list()
ls = list(map (lambda x , y: ((datetime.strptime(x, "%d/%m/%Y %H:%M") - datetime.strptime(y, "%d/%m/%Y %H:%M")).total_seconds())/60 if ('1970' not in str(y)) else "0", lscol2,lscol1))
df[newcol] = np.array(ls)
df[newcol] = df[newcol].astype(float).round(2)
return df
def xxz(df):
df['LASTOCCURRENCE'] = df['LASTOCCURRENCE'].apply(lambda x : pd.Timestamp(x))
return df
def Delta(df):
df['LASTOCCURRENCE'] = df['LASTOCCURRENCE'].apply(lambda x : x - | pd.to_timedelta(2) | pandas.to_timedelta |
import numpy as np
import pandas as pd
import time
import nltk
import string
import re
import os
import joblib
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import json
_debug = True
def debug(text):
if _debug:
print(text)
debug('Importing Parent Database..')
art_df = pd.read_csv('article-database.csv')
model_db = art_df.drop([art_df.columns[0], art_df.columns[2], art_df.columns[5]], axis=1)
model_db_clean = model_db.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False).reset_index(drop=True)
# from ast import literal_eval
def func(raw_tags):
raw_split = raw_tags[1:-1].split(',')
num_tags = len(raw_split)
tags_clean = []
tags_clean.append(raw_split[0][1:-1])
for i in range(1, num_tags):
tags_clean.append(raw_split[i][2:-1])
return tags_clean
debug('Cleaning Parent Data..')
model_db_clean['Tags_clean'] = model_db_clean['Tags'].apply(lambda x: func(x))
multi_label_transform = MultiLabelBinarizer()
multi_label_transform.fit(model_db_clean['Tags_clean'])
y = multi_label_transform.transform(model_db_clean['Tags_clean'])
cols = []
for i in list(multi_label_transform.classes_):
cols.append(i)
cols.append('Text')
prepd_db = | pd.DataFrame() | pandas.DataFrame |
import os
import unittest
import numpy as np
import pandas as pd
from cgnal.core.data.model.ml import (
LazyDataset,
IterGenerator,
MultiFeatureSample,
Sample,
PandasDataset,
PandasTimeIndexedDataset,
CachedDataset,
features_and_labels_to_dataset,
)
from typing import Iterator, Generator
from cgnal.core.tests.core import TestCase, logTest
from tests import TMP_FOLDER
samples = [
Sample(features=[100, 101], label=1),
Sample(features=[102, 103], label=2),
Sample(features=[104, 105], label=3),
Sample(features=[106, 107], label=4),
Sample(features=[108, 109], label=5),
Sample(features=[110, 111], label=6),
Sample(features=[112, 113], label=7),
Sample(features=[114, 115], label=8),
Sample(features=[116, 117], label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
lazyDat = LazyDataset(IterGenerator(samples_gen))
class features_and_labels_to_datasetTests(TestCase):
def test_features_and_labels_to_dataset(self):
dataset = features_and_labels_to_dataset(
pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
pd.Series([0, 0, 0, 1], name="Label"),
)
dataset_no_labels = features_and_labels_to_dataset(
pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
None,
)
self.assertTrue(isinstance(dataset_no_labels, CachedDataset))
self.assertTrue(isinstance(dataset, CachedDataset))
self.assertTrue(
(
dataset.getFeaturesAs("pandas")
== pd.concat(
[
pd.Series([1, 0, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
)
)
.all()
.all()
)
self.assertTrue(
(
dataset.getLabelsAs("pandas")
== pd.DataFrame(pd.Series([0, 0, 0, 1], name="Label"))
)
.all()
.all()
)
class LazyDatasetTests(TestCase):
@logTest
def test_withLookback_MultiFeatureSample(self):
samples = [
MultiFeatureSample(
features=[np.array([100.0, 101.0]), np.array([np.NaN])], label=1.0
),
MultiFeatureSample(
features=[np.array([102.0, 103.0]), np.array([1.0])], label=2.0
),
MultiFeatureSample(
features=[np.array([104.0, 105.0]), np.array([2.0])], label=3.0
),
MultiFeatureSample(
features=[np.array([106.0, 107.0]), np.array([3.0])], label=4.0
),
MultiFeatureSample(
features=[np.array([108.0, 109.0]), np.array([4.0])], label=5.0
),
MultiFeatureSample(
features=[np.array([110.0, 111.0]), np.array([5.0])], label=6.0
),
MultiFeatureSample(
features=[np.array([112.0, 113.0]), np.array([6.0])], label=7.0
),
MultiFeatureSample(
features=[np.array([114.0, 115.0]), np.array([7.0])], label=8.0
),
MultiFeatureSample(
features=[np.array([116.0, 117.0]), np.array([8.0])], label=9.0
),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[102.0, 103.0], [104.0, 105.0], [106.0, 107.0]],
[[104.0, 105.0], [106.0, 107.0], [108.0, 109.0]],
[[106.0, 107.0], [108.0, 109.0], [110.0, 111.0]],
[[108.0, 109.0], [110.0, 111.0], [112.0, 113.0]],
]
)
y1 = np.array(
[
[[1.0], [2.0], [3.0]],
[[2.0], [3.0], [4.0]],
[[3.0], [4.0], [5.0]],
[[4.0], [5.0], [6.0]],
]
)
lab1 = np.array([4.0, 5.0, 6.0, 7.0])
X2 = np.array(
[
[[110.0, 111.0], [112.0, 113.0], [114.0, 115.0]],
[[112.0, 113.0], [114.0, 115.0], [116.0, 117.0]],
]
)
y2 = np.array([[[5.0], [6.0], [7.0]], [[6.0], [7.0], [8.0]]])
lab2 = np.array([8.0, 9.0])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
temp1X = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 0])))
temp1y = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 1])))
tmp1lab = batch1.getLabelsAs("array")
res = [
np.array_equal(temp1X, X1),
np.array_equal(temp1y, y1),
np.array_equal(tmp1lab, lab1),
]
tmp2 = batch2.getFeaturesAs("array")
temp2X = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 0])))
temp2y = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 1])))
tmp2lab = batch2.getLabelsAs("array")
res = res + [
np.array_equal(temp2X, X2),
np.array_equal(temp2y, y2),
np.array_equal(tmp2lab, lab2),
]
self.assertTrue(all(res))
@logTest
def test_withLookback_ArrayFeatureSample(self):
samples = [
Sample(features=np.array([100, 101]), label=1),
Sample(features=np.array([102, 103]), label=2),
Sample(features=np.array([104, 105]), label=3),
Sample(features=np.array([106, 107]), label=4),
Sample(features=np.array([108, 109]), label=5),
Sample(features=np.array([110, 111]), label=6),
Sample(features=np.array([112, 113]), label=7),
Sample(features=np.array([114, 115]), label=8),
Sample(features=np.array([116, 117]), label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[100, 101], [102, 103], [104, 105]],
[[102, 103], [104, 105], [106, 107]],
[[104, 105], [106, 107], [108, 109]],
[[106, 107], [108, 109], [110, 111]],
]
)
lab1 = np.array([3, 4, 5, 6])
X2 = np.array(
[
[[108, 109], [110, 111], [112, 113]],
[[110, 111], [112, 113], [114, 115]],
[[112, 113], [114, 115], [116, 117]],
]
)
lab2 = np.array([7, 8, 9])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
tmp1lab = batch1.getLabelsAs("array")
res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)]
tmp2 = batch2.getFeaturesAs("array")
tmp2lab = batch2.getLabelsAs("array")
res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)]
self.assertTrue(all(res))
@logTest
def test_withLookback_ListFeatureSample(self):
samples = [
Sample(features=[100, 101], label=1),
Sample(features=[102, 103], label=2),
Sample(features=[104, 105], label=3),
Sample(features=[106, 107], label=4),
Sample(features=[108, 109], label=5),
Sample(features=[110, 111], label=6),
Sample(features=[112, 113], label=7),
Sample(features=[114, 115], label=8),
Sample(features=[116, 117], label=9),
]
def samples_gen():
for sample in samples:
if not any([np.isnan(x).any() for x in sample.features]):
yield sample
X1 = np.array(
[
[[100, 101], [102, 103], [104, 105]],
[[102, 103], [104, 105], [106, 107]],
[[104, 105], [106, 107], [108, 109]],
[[106, 107], [108, 109], [110, 111]],
]
)
lab1 = np.array([3, 4, 5, 6])
X2 = np.array(
[
[[108, 109], [110, 111], [112, 113]],
[[110, 111], [112, 113], [114, 115]],
[[112, 113], [114, 115], [116, 117]],
]
)
lab2 = np.array([7, 8, 9])
lookback = 3
batch_size = 4
lazyDat = LazyDataset(IterGenerator(samples_gen))
lookbackDat = lazyDat.withLookback(lookback)
batch_gen = lookbackDat.batch(batch_size)
batch1: CachedDataset = next(batch_gen)
batch2: CachedDataset = next(batch_gen)
tmp1 = batch1.getFeaturesAs("array")
tmp1lab = batch1.getLabelsAs("array")
res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)]
tmp2 = batch2.getFeaturesAs("array")
tmp2lab = batch2.getLabelsAs("array")
res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)]
self.assertTrue(all(res))
@logTest
def test_features_labels(self):
self.assertTrue(isinstance(lazyDat.features(), Generator))
self.assertTrue(isinstance(lazyDat.labels(), Generator))
self.assertTrue(isinstance(lazyDat.getFeaturesAs(), Generator))
self.assertTrue(isinstance(lazyDat.getLabelsAs(), Generator))
self.assertEqual(next(lazyDat.getFeaturesAs()), samples[0].features)
self.assertEqual(next(lazyDat.getLabelsAs()), samples[0].label)
self.assertEqual(next(lazyDat.features()), samples[0].features)
self.assertEqual(next(lazyDat.labels()), samples[0].label)
class CachedDatasetTests(TestCase):
@logTest
def test_to_df(self):
self.assertTrue(isinstance(CachedDataset(lazyDat).to_df(), pd.DataFrame))
self.assertTrue(
(
CachedDataset(lazyDat).to_df()["features"][0].values
== [100, 102, 104, 106, 108, 110, 112, 114, 116]
).all()
)
self.assertTrue(
(
CachedDataset(lazyDat).to_df()["labels"][0].values
== [1, 2, 3, 4, 5, 6, 7, 8, 9]
).all()
)
@logTest
def test_asPandasDataset(self):
self.assertTrue(
isinstance(CachedDataset(lazyDat).asPandasDataset, PandasDataset)
)
self.assertTrue(
(
CachedDataset(lazyDat).asPandasDataset.features[0].values
== [100, 102, 104, 106, 108, 110, 112, 114, 116]
).all()
)
self.assertTrue(
(
CachedDataset(lazyDat).asPandasDataset.labels[0].values
== [1, 2, 3, 4, 5, 6, 7, 8, 9]
).all()
)
class PandasDatasetTests(TestCase):
dataset: PandasDataset = PandasDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=pd.Series([0, 0, 0, 1], name="Label"),
)
dataset_no_label: PandasDataset = PandasDataset(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
)
)
@logTest
def test_check_none(self):
self.assertEqual(self.dataset._check_none(None), None)
self.assertEqual(self.dataset._check_none("test"), "test")
@logTest
def test__len__(self):
self.assertEqual(self.dataset.__len__(), 4)
@logTest
def test_items(self):
self.assertTrue(isinstance(self.dataset.items, Iterator))
self.assertEqual(next(self.dataset.items).features, {"feat1": 1.0, "feat2": 1})
self.assertEqual(next(self.dataset.items).label["Label"], 0)
self.assertEqual(
next(self.dataset_no_label.items).features, {"feat1": 1.0, "feat2": 1}
)
self.assertEqual(next(self.dataset_no_label.items).label, None)
@logTest
def test_dropna_none_labels(self):
res = pd.concat(
[pd.Series([1, 2, 3], name="feat1"), pd.Series([1, 3, 4], name="feat2")],
axis=1,
)
self.assertTrue(
(
self.dataset.dropna(subset=["feat1"]).features.reset_index(drop=True)
== res
)
.all()
.all()
)
self.assertTrue(
(
self.dataset.dropna(feat__subset=["feat1"]).features.reset_index(
drop=True
)
== res
)
.all()
.all()
)
self.assertTrue(
(
self.dataset.dropna(labs__subset=["Label"]).features.reset_index(
drop=True
)
== res
)
.all()
.all()
)
@logTest
def test_cached(self):
self.assertTrue(self.dataset.cached)
@logTest
def test_features_labels(self):
self.assertEqual(
self.dataset.features,
pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
)
self.assertTrue((self.dataset.labels["Label"] == pd.Series([0, 0, 0, 1])).all())
@logTest
def test_index(self):
self.assertTrue((self.dataset.index == range(4)).all())
@logTest
def test_createObject(self):
self.assertTrue(
isinstance(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
),
PandasDataset,
)
)
self.assertEqual(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
).features,
self.dataset_no_label.features,
)
self.assertEqual(
PandasDataset.createObject(
features=pd.concat(
[
pd.Series([1, np.nan, 2, 3], name="feat1"),
pd.Series([1, 2, 3, 4], name="feat2"),
],
axis=1,
),
labels=None,
).labels,
self.dataset_no_label.labels,
)
@logTest
def test_take(self):
self.assertTrue(isinstance(self.dataset.takeAsPandas(1), PandasDataset))
self.assertEqual(
self.dataset.takeAsPandas(1).features.feat2, pd.Series([1], name="feat2")
)
self.assertEqual(
self.dataset.takeAsPandas(1).labels["Label"], pd.Series([0], name="Label")
)
@logTest
def test_loc(self):
self.assertEqual(self.dataset.loc(2).features[2]["feat1"], 2)
self.assertEqual(self.dataset.loc(2).features[2]["feat2"], 3)
self.assertEqual(self.dataset.loc(2).labels[2]["Label"], 0)
self.assertTrue(self.dataset_no_label.loc(2).labels is None)
@logTest
def test_from_sequence(self):
features_1 = pd.DataFrame(
{"feat1": [1, 2, 3, 4], "feat2": [100, 200, 300, 400]}, index=[1, 2, 3, 4]
)
features_2 = pd.DataFrame(
{"feat1": [9, 11, 13, 14], "feat2": [90, 110, 130, 140]},
index=[10, 11, 12, 13],
)
features_3 = pd.DataFrame(
{"feat1": [90, 10, 10, 1400], "feat2": [0.9, 0.11, 0.13, 0.14]},
index=[15, 16, 17, 18],
)
labels_1 = pd.DataFrame({"target": [1, 0, 1, 1]}, index=[1, 2, 3, 4])
labels_2 = pd.DataFrame({"target": [1, 1, 1, 0]}, index=[10, 11, 12, 13])
labels_3 = pd.DataFrame({"target": [0, 1, 1, 0]}, index=[15, 16, 17, 18])
dataset_1 = PandasDataset(features_1, labels_1)
dataset_2 = PandasDataset(features_2, labels_2)
dataset_3 = PandasDataset(features_3, labels_3)
dataset_merged = PandasDataset.from_sequence([dataset_1, dataset_2, dataset_3])
self.assertEqual(
pd.concat([features_1, features_2, features_3]), dataset_merged.features
)
self.assertEqual(
pd.concat([labels_1, labels_2, labels_3]), dataset_merged.labels
)
@logTest
def test_serialization(self):
filename = os.path.join(TMP_FOLDER, "my_dataset.p")
self.dataset.write(filename)
newDataset: PandasDataset = PandasDataset.load(filename)
self.assertTrue(isinstance(newDataset, PandasDataset))
self.assertTrue(
(self.dataset.features.fillna("NaN") == newDataset.features.fillna("NaN"))
.all()
.all()
)
@logTest
def test_creation_from_samples(self):
samples = [
Sample(features=[100, 101], label=1, name=1),
Sample(features=[102, 103], label=2, name=2),
Sample(features=[104, 105], label=1, name=3),
Sample(features=[106, 107], label=2, name=4),
Sample(features=[108, 109], label=2, name=5),
Sample(features=[110, 111], label=2, name=6),
Sample(features=[112, 113], label=1, name=7),
Sample(features=[114, 115], label=2, name=8),
Sample(features=[116, 117], label=2, name=9),
]
lazyDataset = CachedDataset(samples).filter(lambda x: x.label <= 5)
assert isinstance(lazyDataset, LazyDataset)
for format in ["pandas", "array", "dict"]:
features1 = lazyDataset.getFeaturesAs(format)
labels1 = lazyDataset.getLabelsAs(format)
cached: CachedDataset = lazyDataset.asCached
features2 = cached.getFeaturesAs(format)
labels2 = cached.getLabelsAs(format)
self.assertEqual(features1, features2)
self.assertEqual(labels1, labels2)
pandasDataset = cached.asPandasDataset
features3 = pandasDataset.getFeaturesAs(format)
labels3 = pandasDataset.getLabelsAs(format)
self.assertEqual(features1, features3)
self.assertEqual(labels1, labels3)
@logTest
def test_union(self):
union = self.dataset.union(
PandasDataset(
features=pd.concat(
[
pd.Series([np.nan, 5, 6, 7], name="feat1"),
pd.Series([7, 8, 9, 10], name="feat2"),
],
axis=1,
),
labels=pd.Series([0, 0, 0, 1], name="Label"),
)
)
self.assertTrue(isinstance(union, PandasDataset))
self.assertEqual(
union.features.reset_index(drop=True),
pd.concat(
[
pd.Series([1, np.nan, 2, 3, np.nan, 5, 6, 7], name="feat1"),
pd.Series([1, 2, 3, 4, 7, 8, 9, 10], name="feat2"),
],
axis=1,
),
)
self.assertEqual(
union.labels.Label.reset_index(drop=True),
pd.Series([0, 0, 0, 1, 0, 0, 0, 1], name="Label"),
)
@logTest
def test_intersection(self):
other = PandasDataset(
features=pd.concat(
[
| pd.Series([1, 2, 3, 4], name="feat1") | pandas.Series |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 100.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 640.0], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 100860.0], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 273.65], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": | pd.to_numeric([pd.NA, 273.84999999999997], errors="coerce") | pandas.to_numeric |
########################################################################################
############### Imports ###############
########################################################################################
import tensorflow as tf
print(tf.__version__)
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.estimator import DNNClassifier
from tensorflow.keras.datasets.mnist import load_data
from tensorflow.keras import regularizers
from sklearn.model_selection import KFold
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
import warnings
warnings.filterwarnings("ignore")
########################################################################################
############### Load Data ###############
########################################################################################
with tf.device('/GPU:0'):
data = | pd.read_csv('dataMATLAB/features_allrecordings.csv') | pandas.read_csv |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from matplotlib.backend_bases import MouseEvent
from matplotlib.widgets import SpanSelector
from scipy.signal import find_peaks
from scipy.signal import peak_widths
from scipy.optimize import curve_fit
class model:
def __init__(self):
#--- constants
self.c = 2.9979e8
self.h_bar = 6.582119569*1e-16
self.h = 4.1357*1e-15
#inialize boundaries and upload variables
self.x_lim_min = []
self.x_lim_max = []
self.A = []
self.wL = []
self.A_og = []
self.wL_og = []
self.my_file = ''
self.x_lim_min = []
self.x_lim_max = []
self.input_units = ''
self.output_units = ''
self.xlabel_str = ''
#initalize model variables
self.model_A = []
self.df = pd.DataFrame()
self.df_og = []
self.df_new = []
self.mode_df = []
self.e_inf = 1
self.fit_params = []
self.params = []
self.angle_of_incidence = []
self.crystal_index = []
#--- initalize interactive plotting functions
self._figure, self._axes, self._line,self._axes_2,self._dragging_point = None, None, None, None, None
self._points = {}
self.span = []
#--- setting experimental conditions ---
def set_angle_of_incidence(self, angle):
if not isinstance(angle, (int, float)):
raise TypeError('Angle must be numeric (int,float)')
else:
self.angle_of_incidence = angle
def set_crystal_index(self, index):
if not isinstance(index, (int, float, complex)):
raise TypeError('Index must be numeric (int,float, complex)')
else:
self.crystal_index = index
def set_n(self, index):
if not isinstance(index, (int, float, complex)):
raise TypeError('Index must be numeric (int,float, complex)')
else:
self.e_inf = index**2
#--- initiate data upload, auto peak find, and inital plot ---
def set_input_units(self, units):
self.input_units = units
self.output_units = units
def set_output_units(self, units):
self.output_units = units
if units == '1/cm':
self.xlabel_str = 'Wavenumber (cm$^-1$)'
if units == 'Hz':
self.xlabel_str = 'Frequency (Hz)'
if units == 'rad/s':
self.xlabel_str = 'Frequency (rad/s)'
if units == 'um':
self.xlabel_str = 'Wavelength ($\mu$m)'
if units == 'nm':
self.xlabel_str = 'Wavelength (nm)'
if units == 'm':
self.xlabel_str = 'Wavelength (m)'
if units == 'eV':
self.xlabel_str = 'Energy (eV)'
def upload(self, my_file= ''):
data = np.loadtxt(my_file)
self.wL_og = data[:,0]
self.A_og = data[:,1]
self.wL = data[:,0]
self.A = data[:,1]
self.wL_old = self.wL
self.wL = self.unit_conversion(self.wL, self.input_units, self.output_units)
self.x_lim_min = np.min([self.wL[0],self.wL[len(self.wL)-1]])
self.x_lim_max = np.max([self.wL[0], self.wL[len(self.wL)-1]])
self.wL_og = self.unit_conversion(self.wL_og, self.input_units, self.output_units)
self.auto_peak_find()
for ii in range(len(self.df['w0'])):
self._points[self.df['w0'][ii]]= self.df['height'][ii]
self._init_plot()
def set_range(self, x_lim_min, x_lim_max):
self.wL_old = self.wL
idx_lb = (abs(x_lim_min - self.wL_og)).argmin()
idx_ub = (abs(x_lim_max - self.wL_og)).argmin()
if idx_lb < idx_ub:
self.wL = self.wL_og[idx_lb:idx_ub]
self.A = self.A_og[idx_lb:idx_ub]
elif idx_ub < idx_lb:
a = idx_ub
b = idx_lb
idx_lb = a
idx_ub = b
self.wL = self.wL_og[idx_lb:idx_ub]
self.A = self.A_og[idx_lb:idx_ub]
self._points = {}
self.auto_peak_find()
for ii in range(len(self.df['w0'])):
self._points[self.df['w0'][ii]]= self.df['height'][ii]
self._update_plot()
self._axes_2.set_xlim(x_lim_min, x_lim_max)
plt.show()
def auto_peak_find(self):
self.df_new = []
peaks, _ = find_peaks(self.A, height=0.01)
width_parameters = peak_widths(self.A, peaks, rel_height=0.5)
widths = np.empty(len(peaks))
heights = np.empty(len(peaks))
pks = np.empty(len(peaks))
index = np.empty(len(peaks))
for ii in range(0, len(peaks)):
widths[ii] = np.abs(self.unit_conversion(self.wL[int(width_parameters[2][ii])], self.output_units, 'eV') - self.unit_conversion(self.wL[int(width_parameters[3][ii])], self.output_units, 'eV'))
heights[ii] = self.A[peaks[ii]]
pks[ii] = self.wL[peaks[ii]]
index[ii] = peaks[ii]
self.df_new = | pd.DataFrame({'index':index, 'w0': pks,'height': heights,'fwhm': widths}) | pandas.DataFrame |
import streamlit as st
import streamlit.components.v1 as components
import pandas as pd
import requests
from PIL import Image
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import geopandas as gpd
import geoplot as gplt
# Select box to switch between the two pages
view_picker = st.sidebar.selectbox('Change View', ('Local COVID-19 Cases Analysis',
"Risk Profile Dashboard",
'Risk Profile Survey',
"Daily Risk Profile Survey"
)
)
if view_picker == 'Risk Profile Survey':
components.iframe(
'https://docs.google.com/forms/d/e/1FAIpQLSdkgGD1FK7c6ZcGAQP4lavawr_yxczSdDAbpzXarZymPpJvLA/viewform?embedded=true',
scrolling=True,
height=800)
elif view_picker == 'Daily Risk Profile Survey':
components.iframe(
'https://docs.google.com/forms/d/e/1FAIpQLSe1RYfDpImWdoHulRn4uYVP5aLnfCxfTwyGBvsplZ4GFugfnQ/viewform?embedded=true',
scrolling=True,
height=800)
elif view_picker == 'Risk Profile Dashboard':
st.title('Risk Profile Dashboard')
components.iframe(
'https://public.tableau.com/views/Risk_Profile2_0/Dashboard1?:showVizHome=no&:embed=true',
scrolling=True,
height=900,
width=1000)
'''
# Daily
'''
components.iframe(
'https://public.tableau.com/views/New_Daily/DailyProfile?:showVizHome=no&:embed=true',
scrolling=True,
height=900,
width=1000)
elif view_picker == 'Local COVID-19 Cases Analysis':
st.title('Local COVID-19 Cases Analysis')
county_geo_json_url = "https://opendata.arcgis.com/datasets/a7887f1940b34bf5a02c6f7f27a5cb2c_0.geojson"
county_codes_url = 'https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json'
fl_demo_url = 'https://opendata.arcgis.com/datasets/61a30fb3ea4c43e4854fbb4c1be57394_0.geojson'
def api_check(url):
with requests.get(url) as test:
test = test
if test.status_code != 200:
st.error("API DOWN!")
exit(0)
api_check(county_geo_json_url)
api_check(county_codes_url)
api_check(fl_demo_url)
@st.cache
def getData(api):
with requests.get(api) as data:
return data
@st.cache(allow_output_mutation=True)
def createGPD(api):
data = gpd.read_file(api)
if api is county_geo_json_url:
# County Data Preprocessing
data['COUNTY'] = data['COUNTY'].apply(lambda x: int(str('12') + str(x)))
data['COUNTY'] = data['COUNTY'].replace([12025], 12086)
return data
@st.cache
def make_map():
pass
# Getting the data
county_data = createGPD(county_geo_json_url)
counties = getData(county_codes_url).json()
fl_demo = createGPD(fl_demo_url)
# Environmental/Disease Data
fdoh_data = pd.read_csv('../data/fdoh-data.csv')
fdoh_data = fdoh_data.drop([0]).rename(columns={'Unnamed: 0': 'County', 'Unnamed: 1': "FIPS"})
df = county_data[['County_1', 'SHAPE_Length', 'SHAPE_Area', 'geometry']].rename(
columns={'County_1': 'County'})
fdoh_data = pd.merge(fdoh_data, df, on='County')
fdoh_data['FIPS'] = fdoh_data['FIPS'].apply(lambda x: x.replace('-', ''))
'''
# Objective
Our goal as a team is to identify how can students and FIU partner together to help safely open
the campus for classes.
# Assumptions
In our analysis we will be looking at county level data to see how prevalent the risk factors
as announced by the CDC are in our community. Factors such as:
- Age
- Ethnicity
- Respiratory Diseases
- Risky Behavior
- Current Situation
'''
'''
# Age
Most public health organizations have stated that age is a key indicator of how sick a patient may get from
COVID-19. 60 up tends to be the general age where COVID can pose a high life threatening issue. '''
gplt.choropleth(
fl_demo, hue='TotalPopul',
cmap='Reds', figsize=(14, 5),
legend=True,
edgecolor='white'
)
plt.title('Total Population')
st.pyplot()
'''
Dade and Broward are the most populated areas in Florida as such we would expect the spread to be
centered around these two counties.
'''
gplt.choropleth(
fl_demo, hue='Pop_65andO',
cmap='Blues', figsize=(14, 5),
legend=True,
edgecolor='white'
)
plt.title('Population Over the Age of 65')
st.pyplot()
'''
From this chart we can see that the two counties near FIU Broward and Dade also has the largest
population of people over the age of 65. One thing to note is that the data used was collected
5 years ago.
'''
sns.barplot(x=['0-4', '5-14', '15-24', '25-34', '35-44', '45-54', '55-64', '65-74', '85plus'],
y=[county_data['Age_0_4'].sum(), county_data['Age_5_14'].sum(), county_data['Age_15_24'].sum(),
county_data['Age_25_34'].sum(), county_data['Age_35_44'].sum(), county_data['Age_45_54'].sum(),
county_data['Age_55_64'].sum(), county_data['Age_65_74'].sum(), county_data['Age_85plus'].sum()])
plt.title('Covid Cases by Age')
st.pyplot()
fig = px.choropleth(county_data, geojson=counties, locations='COUNTY', color='T_positive',
color_continuous_scale="Viridis", hover_name='COUNTYNAME',
hover_data=['Age_0_4', 'Age_5_14', 'Age_25_34', 'Age_35_44', 'Age_45_54', 'Age_55_64',
'Age_65_74', 'Age_85plus'],
scope="usa",
title="Positive Test Cases",
range_color=(0, 10000)
)
fig.update_geos(
projection={'scale': 4},
center={'lat': 27.6648, 'lon': -81.5158},
visible=False
)
st.plotly_chart(fig)
'''
From the data we can see that the largest age range that has tested positive for COVID-19 is 25
up but more importantly the area around FIU is a hot spot. As such taking age in to consideration
people older than 60 should remain in quarantine with schools opening we will see an influx in
cases starting at a much younger age which could have unforeseen consequences. The risk index for
anyone above the age of 60 would be high and depending on underlying health conditions younger
individuals will need to be classified.
# Ethnicity
According to the CDC there does exist an inequality in the system which puts minorities at a
higher risk for contracting the virus. This can be due to multiple reasons such as discrimination,
health care access, occupation, education, housing or all of the above.
'''
minority = county_data['C_RaceBlack'] + county_data['C_HispanicYES'] + county_data['C_RaceOther']
sns.barplot(x=['White', 'Black', 'Hispanic', 'Other', 'Combined Minority'],
y=[county_data['C_RaceWhite'].sum(),
county_data['C_RaceBlack'].sum(), county_data['C_HispanicYES'].sum(),
county_data['C_RaceOther'].sum(), minority.sum()])
plt.title('COVID Cases by Race')
st.pyplot()
county_data['Minority'] = minority
fig = px.choropleth(county_data, geojson=counties, locations='COUNTY', color='Minority',
color_continuous_scale="Viridis", hover_name='COUNTYNAME',
hover_data=['T_positive'],
scope="usa",
title="Minority Cases",
range_color=(0, 20000)
)
fig.update_geos(
projection={'scale': 4},
center={'lat': 27.6648, 'lon': -81.5158},
visible=False
)
st.plotly_chart(fig)
'''
We can see from the map that the cases for minorities near FIU is the highest in the State. With FIU
serving primarily minorities, this puts our campus at a higher risk of being a hot spot for the
spread of the virus. As we will be seeing students coming from Miami-Dade and Broward which are the largest hot
spots in Florida. Which means we have to take the proper precautions as we reopen the campus.
# Respiratory Diseases
Various underlying health issues can increase the risk factor for an individual significantly. We will
take a look at the respiratory disease as a base line for its the one that aligns most closely with
the symptoms of the virus.
'''
fig = px.choropleth(fdoh_data, geojson=counties, locations='FIPS', color='Number of COPD Hospitalizations',
color_continuous_scale="Viridis", hover_name='County',
hover_data=[],
scope="usa",
title='Number of COPD Hospitalizations',
labels={'Number of COPD Hospitalizations': 'COPD Cases'}
)
fig.update_geos(
projection={'scale': 4},
center={'lat': 27.6648, 'lon': -81.5158},
visible=False
)
st.plotly_chart(fig)
st.write("*Chronic obstructive pulmonary disease (COPD)")
fig = px.choropleth(fdoh_data, geojson=counties, locations='FIPS',
color='Number of Asthma Emergency Department Visits',
color_continuous_scale="Viridis", hover_name='County',
hover_data=[],
scope="usa",
title='Number of Asthma Emergency Department Visits',
labels={'Number of Asthma Emergency Department Visits': 'Asthma Cases'}
)
fig.update_geos(
projection={'scale': 4},
center={'lat': 27.6648, 'lon': -81.5158},
visible=False
)
st.plotly_chart(fig)
fig = px.choropleth(fdoh_data, geojson=counties, locations='FIPS', color='Number of Asthma Hospitalizations',
color_continuous_scale="Viridis", hover_name='County',
hover_data=[],
scope="usa",
title='Number of Asthma Hospitalizations',
labels={'Number of Asthma Hospitalizations': 'Asthma Cases'}
)
fig.update_geos(
projection={'scale': 4},
center={'lat': 27.6648, 'lon': -81.5158},
visible=False
)
st.plotly_chart(fig)
'''
The highest of respiratory illness taken from 2018 data as shown above is centered around
Dade and Broward areas. The number of ER room visits should be looked at with a bit of skepticism,
however taking a look at the hospitalizations we can see that there is a high probability that a
person who has a respiratory illness is in the Dade and Broward areas which significantly increases
their risk index from COVID.
'''
'''
# Risky Behaviors
We will be referencing this [report](https://www.gstatic.com/covid19/mobility/2020-07-27_US_Florida_Mobility_Report_en.pdf)
released by Google for this section.
Taking a look at this report on how these two communities have coped with the virus,
we can see that people are actively trying to avoid places such as transit stations, parks and
following mandated work from home policies. However, it's interesting that from the baseline we are
seeing only a minor decrease to retail & recreation. We know that the virus is spreading in these
two communities and as such we can infer that the hot spot for these transmissions are currently
"retail & recreation" activities and "grocery & pharmacy" stores. After which it may spread within
the household with increased contact.
'''
dade = Image.open('../image/dade-mobility.PNG')
broward = Image.open('../image/broward-mobility.PNG')
st.image(dade, caption='Dade Mobility Report',
use_column_width=True)
st.image(broward, caption='Broward Mobility Report',
use_column_width=True)
'''
# Current Situation
Below is a quick snapshot of the current COVID-19 data.
'''
st.info("Select \"State\" to view the entire State data")
fig = go.Figure()
fig = make_subplots(rows=3, cols=2,
specs=[[{'type': 'domain', 'colspan': 2}, None], [{'type': 'xy'}, {'type': 'xy'}],
[{'type': 'xy', 'colspan': 2}, None]],
subplot_titles=('', 'Testing Results', 'Cases by Race', "Median Age"))
# Multiselect box
county_picker = st.multiselect('Select County',
list(county_data['County_1'].sort_values()),
['Dade', 'Broward', 'Monroe', 'Collier']
)
if "State" in county_picker:
local_counties_county = pd.DataFrame()
selected = county_data['County_1'] != 'State'
local_counties_county = local_counties_county.append(county_data[selected], ignore_index=True)
else:
local_counties_county = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
cards = pd.read_json("https://us-central1-tumbledmtg-website.cloudfunctions.net/api/cards")
def get_mana_value(decklists):
values = []
bodies = decklists['cards']
for body in bodies:
count = 0
mv = 0
for line in body.split("\n"):
if len(line) < 5:
continue
card = cards[cards['name'] == line.split(";")[1]]
if card['cmc'].size > 0 and "Land" not in str(card['type']):
mv += (int(card['cmc'].iloc[0]) * int(line.split(';')[0]))
count += int(line.split(';')[0])
values.append(mv/count)
return pd.Series(values)
def get_type(decklists, card_type):
lands = []
bodies = decklists['cards']
for body in bodies:
count = 0
lines = body.split("\n")
for line in lines:
if len(line) < 5:
continue
card = cards[cards['name'] == line.split(";")[1]]
if card_type in str(card['type']):
count += int(line.split(';')[0])
lands.append(count)
return | pd.Series(lands) | pandas.Series |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Implement DataFrame public API as Pandas does.
Almost all docstrings for public and magic methods should be inherited from Pandas
for better maintability. So some codes are ignored in pydocstyle check:
- D101: missing docstring in class
- D102: missing docstring in public method
- D105: missing docstring in magic method
Manually add documentation for methods which are not presented in pandas.
"""
import pandas
from pandas.core.common import apply_if_callable
from pandas.core.dtypes.common import (
infer_dtype_from_object,
is_dict_like,
is_list_like,
is_numeric_dtype,
)
from pandas.core.indexes.api import ensure_index_from_sequences
from pandas.util._validators import validate_bool_kwarg
from pandas.io.formats.printing import pprint_thing
from pandas._libs.lib import no_default
from pandas._typing import Label
import itertools
import functools
import numpy as np
import sys
from typing import Optional, Sequence, Tuple, Union, Mapping
import warnings
from modin.error_message import ErrorMessage
from modin.utils import _inherit_docstrings, to_pandas, hashable
from modin.config import IsExperimental
from .utils import (
from_pandas,
from_non_pandas,
)
from .iterator import PartitionIterator
from .series import Series
from .base import BasePandasDataset, _ATTRS_NO_LOOKUP
from .groupby import DataFrameGroupBy
from .accessor import CachedAccessor, SparseFrameAccessor
@_inherit_docstrings(pandas.DataFrame, excluded=[pandas.DataFrame.__init__])
class DataFrame(BasePandasDataset):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""
Distributed DataFrame object backed by Pandas dataframes.
Parameters
----------
data: NumPy ndarray (structured or homogeneous) or dict:
Dict can contain Series, arrays, constants, or list-like
objects.
index: pandas.Index, list, ObjectID
The row index for this DataFrame.
columns: pandas.Index
The column names for this DataFrame, in pandas Index object.
dtype: Data type to force.
Only a single dtype is allowed. If None, infer
copy: bool
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
query_compiler: query_compiler
A query compiler object to manage distributed computation.
"""
if isinstance(data, (DataFrame, Series)):
self._query_compiler = data._query_compiler.copy()
if index is not None and any(i not in data.index for i in index):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if isinstance(data, Series):
# We set the column name if it is not in the provided Series
if data.name is None:
self.columns = [0] if columns is None else columns
# If the columns provided are not in the named Series, pandas clears
# the DataFrame and sets columns to the columns provided.
elif columns is not None and data.name not in columns:
self._query_compiler = from_pandas(
DataFrame(columns=columns)
)._query_compiler
if index is not None:
self._query_compiler = data.loc[index]._query_compiler
elif columns is None and index is None:
data._add_sibling(self)
else:
if columns is not None and any(i not in data.columns for i in columns):
raise NotImplementedError(
"Passing non-existant columns or index values to constructor not"
" yet implemented."
)
if index is None:
index = slice(None)
if columns is None:
columns = slice(None)
self._query_compiler = data.loc[index, columns]._query_compiler
# Check type of data and use appropriate constructor
elif query_compiler is None:
distributed_frame = from_non_pandas(data, index, columns, dtype)
if distributed_frame is not None:
self._query_compiler = distributed_frame._query_compiler
return
warnings.warn(
"Distributing {} object. This may take some time.".format(type(data))
)
if is_list_like(data) and not is_dict_like(data):
old_dtype = getattr(data, "dtype", None)
values = [
obj._to_pandas() if isinstance(obj, Series) else obj for obj in data
]
if isinstance(data, np.ndarray):
data = np.array(values, dtype=old_dtype)
else:
try:
data = type(data)(values, dtype=old_dtype)
except TypeError:
data = values
elif is_dict_like(data) and not isinstance(
data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
):
data = {
k: v._to_pandas() if isinstance(v, Series) else v
for k, v in data.items()
}
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __repr__(self):
from pandas.io.formats import console
num_rows = pandas.get_option("display.max_rows") or 10
num_cols = | pandas.get_option("display.max_columns") | pandas.get_option |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
import plotly.plotly.plotly as py
import pandas as pd
import numpy as np
import json
from datetime import timedelta
def plot_function():
start_date = "2012-02-01"
end_date = "2012-03-01"
data_file = "D:/coursework/L4S2/GroupProject/repo/TeamFxPortal/static/data/" + "EURUSD" + "/DAT_MT_" + "EURUSD" + "_M1_" + \
"2012" + ".csv"
# news = ["Brexit","US presidential election 2012"]
# price
data = pd.read_csv(data_file)
data['Time'] = data[['Date', 'Time']].apply(lambda x: ' '.join(x), axis=1)
data['Time'] = data['Time'].apply(lambda x: | pd.to_datetime(x) | pandas.to_datetime |
"""
A warehouse for constant values required to initilize the PUDL Database.
This constants module stores and organizes a bunch of constant values which are
used throughout PUDL to populate static lists within the data packages or for
data cleaning purposes.
"""
import pandas as pd
import sqlalchemy as sa
######################################################################
# Constants used within the init.py module.
######################################################################
prime_movers = [
'steam_turbine',
'gas_turbine',
'hydro',
'internal_combustion',
'solar_pv',
'wind_turbine'
]
"""list: A list of the types of prime movers"""
rto_iso = {
'CAISO': 'California ISO',
'ERCOT': 'Electric Reliability Council of Texas',
'MISO': 'Midcontinent ISO',
'ISO-NE': 'ISO New England',
'NYISO': 'New York ISO',
'PJM': 'PJM Interconnection',
'SPP': 'Southwest Power Pool'
}
"""dict: A dictionary containing ISO/RTO abbreviations (keys) and names (values)
"""
us_states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values)
"""
canada_prov_terr = {
'AB': 'Alberta',
'BC': 'British Columbia',
'CN': 'Canada',
'MB': 'Manitoba',
'NB': 'New Brunswick',
'NS': 'Nova Scotia',
'NL': 'Newfoundland and Labrador',
'NT': 'Northwest Territories',
'NU': 'Nunavut',
'ON': 'Ontario',
'PE': 'Prince Edwards Island',
'QC': 'Quebec',
'SK': 'Saskatchewan',
'YT': 'Yukon Territory',
}
"""dict: A dictionary containing Canadian provinces' and territories'
abbreviations (keys) and names (values)
"""
cems_states = {k: v for k, v in us_states.items() if v not in
{'Alaska',
'American Samoa',
'Guam',
'Hawaii',
'Northern Mariana Islands',
'National',
'Puerto Rico',
'Virgin Islands'}
}
"""dict: A dictionary containing US state abbreviations (keys) and names
(values) that are present in the CEMS dataset
"""
# This is imperfect for states that have split timezones. See:
# https://en.wikipedia.org/wiki/List_of_time_offsets_by_U.S._state_and_territory
# For states that are split, I went with where there seem to be more people
# List of timezones in pytz.common_timezones
# Canada: https://en.wikipedia.org/wiki/Time_in_Canada#IANA_time_zone_database
state_tz_approx = {
"AK": "US/Alaska", # Alaska; Not in CEMS
"AL": "US/Central", # Alabama
"AR": "US/Central", # Arkansas
"AS": "Pacific/Pago_Pago", # American Samoa; Not in CEMS
"AZ": "US/Arizona", # Arizona
"CA": "US/Pacific", # California
"CO": "US/Mountain", # Colorado
"CT": "US/Eastern", # Connecticut
"DC": "US/Eastern", # District of Columbia
"DE": "US/Eastern", # Delaware
"FL": "US/Eastern", # Florida (split state)
"GA": "US/Eastern", # Georgia
"GU": "Pacific/Guam", # Guam; Not in CEMS
"HI": "US/Hawaii", # Hawaii; Not in CEMS
"IA": "US/Central", # Iowa
"ID": "US/Mountain", # Idaho (split state)
"IL": "US/Central", # Illinois
"IN": "US/Eastern", # Indiana (split state)
"KS": "US/Central", # Kansas (split state)
"KY": "US/Eastern", # Kentucky (split state)
"LA": "US/Central", # Louisiana
"MA": "US/Eastern", # Massachusetts
"MD": "US/Eastern", # Maryland
"ME": "US/Eastern", # Maine
"MI": "America/Detroit", # Michigan (split state)
"MN": "US/Central", # Minnesota
"MO": "US/Central", # Missouri
"MP": "Pacific/Saipan", # Northern Mariana Islands; Not in CEMS
"MS": "US/Central", # Mississippi
"MT": "US/Mountain", # Montana
"NC": "US/Eastern", # North Carolina
"ND": "US/Central", # North Dakota (split state)
"NE": "US/Central", # Nebraska (split state)
"NH": "US/Eastern", # New Hampshire
"NJ": "US/Eastern", # New Jersey
"NM": "US/Mountain", # New Mexico
"NV": "US/Pacific", # Nevada
"NY": "US/Eastern", # New York
"OH": "US/Eastern", # Ohio
"OK": "US/Central", # Oklahoma
"OR": "US/Pacific", # Oregon (split state)
"PA": "US/Eastern", # Pennsylvania
"PR": "America/Puerto_Rico", # Puerto Rico; Not in CEMS
"RI": "US/Eastern", # Rhode Island
"SC": "US/Eastern", # South Carolina
"SD": "US/Central", # South Dakota (split state)
"TN": "US/Central", # Tennessee
"TX": "US/Central", # Texas
"UT": "US/Mountain", # Utah
"VA": "US/Eastern", # Virginia
"VI": "America/Puerto_Rico", # Virgin Islands; Not in CEMS
"VT": "US/Eastern", # Vermont
"WA": "US/Pacific", # Washington
"WI": "US/Central", # Wisconsin
"WV": "US/Eastern", # West Virginia
"WY": "US/Mountain", # Wyoming
# Canada (none of these are in CEMS)
"AB": "America/Edmonton", # Alberta
"BC": "America/Vancouver", # British Columbia (split province)
"MB": "America/Winnipeg", # Manitoba
"NB": "America/Moncton", # New Brunswick
"NS": "America/Halifax", # Nova Scotia
"NL": "America/St_Johns", # Newfoundland and Labrador (split province)
"NT": "America/Yellowknife", # Northwest Territories (split province)
"NU": "America/Iqaluit", # Nunavut (split province)
"ON": "America/Toronto", # Ontario (split province)
"PE": "America/Halifax", # Prince Edwards Island
"QC": "America/Montreal", # Quebec (split province)
"SK": "America/Regina", # Saskatchewan (split province)
"YT": "America/Whitehorse", # Yukon Territory
}
"""dict: A dictionary containing US and Canadian state/territory abbreviations
(keys) and timezones (values)
"""
ferc1_power_purchase_type = {
'RQ': 'requirement',
'LF': 'long_firm',
'IF': 'intermediate_firm',
'SF': 'short_firm',
'LU': 'long_unit',
'IU': 'intermediate_unit',
'EX': 'electricity_exchange',
'OS': 'other_service',
'AD': 'adjustment'
}
"""dict: A dictionary of abbreviations (keys) and types (values) for power
purchase agreements from FERC Form 1.
"""
# Dictionary mapping DBF files (w/o .DBF file extension) to DB table names
ferc1_dbf2tbl = {
'F1_1': 'f1_respondent_id',
'F1_2': 'f1_acb_epda',
'F1_3': 'f1_accumdepr_prvsn',
'F1_4': 'f1_accumdfrrdtaxcr',
'F1_5': 'f1_adit_190_detail',
'F1_6': 'f1_adit_190_notes',
'F1_7': 'f1_adit_amrt_prop',
'F1_8': 'f1_adit_other',
'F1_9': 'f1_adit_other_prop',
'F1_10': 'f1_allowances',
'F1_11': 'f1_bal_sheet_cr',
'F1_12': 'f1_capital_stock',
'F1_13': 'f1_cash_flow',
'F1_14': 'f1_cmmn_utlty_p_e',
'F1_15': 'f1_comp_balance_db',
'F1_16': 'f1_construction',
'F1_17': 'f1_control_respdnt',
'F1_18': 'f1_co_directors',
'F1_19': 'f1_cptl_stk_expns',
'F1_20': 'f1_csscslc_pcsircs',
'F1_21': 'f1_dacs_epda',
'F1_22': 'f1_dscnt_cptl_stk',
'F1_23': 'f1_edcfu_epda',
'F1_24': 'f1_elctrc_erg_acct',
'F1_25': 'f1_elctrc_oper_rev',
'F1_26': 'f1_elc_oper_rev_nb',
'F1_27': 'f1_elc_op_mnt_expn',
'F1_28': 'f1_electric',
'F1_29': 'f1_envrnmntl_expns',
'F1_30': 'f1_envrnmntl_fclty',
'F1_31': 'f1_fuel',
'F1_32': 'f1_general_info',
'F1_33': 'f1_gnrt_plant',
'F1_34': 'f1_important_chg',
'F1_35': 'f1_incm_stmnt_2',
'F1_36': 'f1_income_stmnt',
'F1_37': 'f1_miscgen_expnelc',
'F1_38': 'f1_misc_dfrrd_dr',
'F1_39': 'f1_mthly_peak_otpt',
'F1_40': 'f1_mtrl_spply',
'F1_41': 'f1_nbr_elc_deptemp',
'F1_42': 'f1_nonutility_prop',
'F1_43': 'f1_note_fin_stmnt', # 37% of DB
'F1_44': 'f1_nuclear_fuel',
'F1_45': 'f1_officers_co',
'F1_46': 'f1_othr_dfrrd_cr',
'F1_47': 'f1_othr_pd_in_cptl',
'F1_48': 'f1_othr_reg_assets',
'F1_49': 'f1_othr_reg_liab',
'F1_50': 'f1_overhead',
'F1_51': 'f1_pccidica',
'F1_52': 'f1_plant_in_srvce',
'F1_53': 'f1_pumped_storage',
'F1_54': 'f1_purchased_pwr',
'F1_55': 'f1_reconrpt_netinc',
'F1_56': 'f1_reg_comm_expn',
'F1_57': 'f1_respdnt_control',
'F1_58': 'f1_retained_erng',
'F1_59': 'f1_r_d_demo_actvty',
'F1_60': 'f1_sales_by_sched',
'F1_61': 'f1_sale_for_resale',
'F1_62': 'f1_sbsdry_totals',
'F1_63': 'f1_schedules_list',
'F1_64': 'f1_security_holder',
'F1_65': 'f1_slry_wg_dstrbtn',
'F1_66': 'f1_substations',
'F1_67': 'f1_taxacc_ppchrgyr',
'F1_68': 'f1_unrcvrd_cost',
'F1_69': 'f1_utltyplnt_smmry',
'F1_70': 'f1_work',
'F1_71': 'f1_xmssn_adds',
'F1_72': 'f1_xmssn_elc_bothr',
'F1_73': 'f1_xmssn_elc_fothr',
'F1_74': 'f1_xmssn_line',
'F1_75': 'f1_xtraordnry_loss',
'F1_76': 'f1_codes_val',
'F1_77': 'f1_sched_lit_tbl',
'F1_78': 'f1_audit_log',
'F1_79': 'f1_col_lit_tbl',
'F1_80': 'f1_load_file_names',
'F1_81': 'f1_privilege',
'F1_82': 'f1_sys_error_log',
'F1_83': 'f1_unique_num_val',
'F1_84': 'f1_row_lit_tbl',
'F1_85': 'f1_footnote_data',
'F1_86': 'f1_hydro',
'F1_87': 'f1_footnote_tbl', # 52% of DB
'F1_88': 'f1_ident_attsttn',
'F1_89': 'f1_steam',
'F1_90': 'f1_leased',
'F1_91': 'f1_sbsdry_detail',
'F1_92': 'f1_plant',
'F1_93': 'f1_long_term_debt',
'F1_106_2009': 'f1_106_2009',
'F1_106A_2009': 'f1_106a_2009',
'F1_106B_2009': 'f1_106b_2009',
'F1_208_ELC_DEP': 'f1_208_elc_dep',
'F1_231_TRN_STDYCST': 'f1_231_trn_stdycst',
'F1_324_ELC_EXPNS': 'f1_324_elc_expns',
'F1_325_ELC_CUST': 'f1_325_elc_cust',
'F1_331_TRANSISO': 'f1_331_transiso',
'F1_338_DEP_DEPL': 'f1_338_dep_depl',
'F1_397_ISORTO_STL': 'f1_397_isorto_stl',
'F1_398_ANCL_PS': 'f1_398_ancl_ps',
'F1_399_MTH_PEAK': 'f1_399_mth_peak',
'F1_400_SYS_PEAK': 'f1_400_sys_peak',
'F1_400A_ISO_PEAK': 'f1_400a_iso_peak',
'F1_429_TRANS_AFF': 'f1_429_trans_aff',
'F1_ALLOWANCES_NOX': 'f1_allowances_nox',
'F1_CMPINC_HEDGE_A': 'f1_cmpinc_hedge_a',
'F1_CMPINC_HEDGE': 'f1_cmpinc_hedge',
'F1_EMAIL': 'f1_email',
'F1_RG_TRN_SRV_REV': 'f1_rg_trn_srv_rev',
'F1_S0_CHECKS': 'f1_s0_checks',
'F1_S0_FILING_LOG': 'f1_s0_filing_log',
'F1_SECURITY': 'f1_security'
# 'F1_PINS': 'f1_pins', # private data, not publicized.
# 'F1_FREEZE': 'f1_freeze', # private data, not publicized
}
"""dict: A dictionary mapping FERC Form 1 DBF files(w / o .DBF file extension)
(keys) to database table names (values).
"""
ferc1_huge_tables = {
'f1_footnote_tbl',
'f1_footnote_data',
'f1_note_fin_stmnt',
}
"""set: A set containing large FERC Form 1 tables.
"""
# Invert the map above so we can go either way as needed
ferc1_tbl2dbf = {v: k for k, v in ferc1_dbf2tbl.items()}
"""dict: A dictionary mapping database table names (keys) to FERC Form 1 DBF
files(w / o .DBF file extension) (values).
"""
# This dictionary maps the strings which are used to denote field types in the
# DBF objects to the corresponding generic SQLAlchemy Column types:
# These definitions come from a combination of the dbfread example program
# dbf2sqlite and this DBF file format documentation page:
# http://www.dbase.com/KnowledgeBase/int/db7_file_fmt.htm
# Un-mapped types left as 'XXX' which should obviously make an error...
dbf_typemap = {
'C': sa.String,
'D': sa.Date,
'F': sa.Float,
'I': sa.Integer,
'L': sa.Boolean,
'M': sa.Text, # 10 digit .DBT block number, stored as a string...
'N': sa.Float,
'T': sa.DateTime,
'0': sa.Integer, # based on dbf2sqlite mapping
'B': 'XXX', # .DBT block number, binary string
'@': 'XXX', # Timestamp... Date = Julian Day, Time is in milliseconds?
'+': 'XXX', # Autoincrement (e.g. for IDs)
'O': 'XXX', # Double, 8 bytes
'G': 'XXX', # OLE 10 digit/byte number of a .DBT block, stored as string
}
"""dict: A dictionary mapping field types in the DBF objects (keys) to the
corresponding generic SQLAlchemy Column types.
"""
# This is the set of tables which have been successfully integrated into PUDL:
ferc1_pudl_tables = (
'fuel_ferc1', # Plant-level data, linked to plants_steam_ferc1
'plants_steam_ferc1', # Plant-level data
'plants_small_ferc1', # Plant-level data
'plants_hydro_ferc1', # Plant-level data
'plants_pumped_storage_ferc1', # Plant-level data
'purchased_power_ferc1', # Inter-utility electricity transactions
'plant_in_service_ferc1', # Row-mapped plant accounting data.
# 'accumulated_depreciation_ferc1' # Requires row-mapping to be useful.
)
"""tuple: A tuple containing the FERC Form 1 tables that can be successfully
integrated into PUDL.
"""
table_map_ferc1_pudl = {
'fuel_ferc1': 'f1_fuel',
'plants_steam_ferc1': 'f1_steam',
'plants_small_ferc1': 'f1_gnrt_plant',
'plants_hydro_ferc1': 'f1_hydro',
'plants_pumped_storage_ferc1': 'f1_pumped_storage',
'plant_in_service_ferc1': 'f1_plant_in_srvce',
'purchased_power_ferc1': 'f1_purchased_pwr',
# 'accumulated_depreciation_ferc1': 'f1_accumdepr_prvsn'
}
"""dict: A dictionary mapping PUDL table names (keys) to the corresponding FERC
Form 1 DBF table names.
"""
# This is the list of EIA923 tables that can be successfully pulled into PUDL
eia923_pudl_tables = ('generation_fuel_eia923',
'boiler_fuel_eia923',
'generation_eia923',
'coalmine_eia923',
'fuel_receipts_costs_eia923')
"""tuple: A tuple containing the EIA923 tables that can be successfully
integrated into PUDL.
"""
epaipm_pudl_tables = (
'transmission_single_epaipm',
'transmission_joint_epaipm',
'load_curves_epaipm',
'plant_region_map_epaipm',
)
"""tuple: A tuple containing the EPA IPM tables that can be successfully
integrated into PUDL.
"""
# List of entity tables
entity_tables = ['utilities_entity_eia',
'plants_entity_eia',
'generators_entity_eia',
'boilers_entity_eia',
'regions_entity_epaipm', ]
"""list: A list of PUDL entity tables.
"""
xlsx_maps_pkg = 'pudl.package_data.meta.xlsx_maps'
"""string: The location of the xlsx maps within the PUDL package data."""
##############################################################################
# EIA 923 Spreadsheet Metadata
##############################################################################
##############################################################################
# EIA 860 Spreadsheet Metadata
##############################################################################
# This is the list of EIA860 tables that can be successfully pulled into PUDL
eia860_pudl_tables = (
'boiler_generator_assn_eia860',
'utilities_eia860',
'plants_eia860',
'generators_eia860',
'ownership_eia860'
)
"""tuple: A tuple enumerating EIA 860 tables for which PUDL's ETL works."""
# The set of FERC Form 1 tables that have the same composite primary keys: [
# respondent_id, report_year, report_prd, row_number, spplmnt_num ].
# TODO: THIS ONLY PERTAINS TO 2015 AND MAY NEED TO BE ADJUSTED BY YEAR...
ferc1_data_tables = (
'f1_acb_epda', 'f1_accumdepr_prvsn', 'f1_accumdfrrdtaxcr',
'f1_adit_190_detail', 'f1_adit_190_notes', 'f1_adit_amrt_prop',
'f1_adit_other', 'f1_adit_other_prop', 'f1_allowances', 'f1_bal_sheet_cr',
'f1_capital_stock', 'f1_cash_flow', 'f1_cmmn_utlty_p_e',
'f1_comp_balance_db', 'f1_construction', 'f1_control_respdnt',
'f1_co_directors', 'f1_cptl_stk_expns', 'f1_csscslc_pcsircs',
'f1_dacs_epda', 'f1_dscnt_cptl_stk', 'f1_edcfu_epda', 'f1_elctrc_erg_acct',
'f1_elctrc_oper_rev', 'f1_elc_oper_rev_nb', 'f1_elc_op_mnt_expn',
'f1_electric', 'f1_envrnmntl_expns', 'f1_envrnmntl_fclty', 'f1_fuel',
'f1_general_info', 'f1_gnrt_plant', 'f1_important_chg', 'f1_incm_stmnt_2',
'f1_income_stmnt', 'f1_miscgen_expnelc', 'f1_misc_dfrrd_dr',
'f1_mthly_peak_otpt', 'f1_mtrl_spply', 'f1_nbr_elc_deptemp',
'f1_nonutility_prop', 'f1_note_fin_stmnt', 'f1_nuclear_fuel',
'f1_officers_co', 'f1_othr_dfrrd_cr', 'f1_othr_pd_in_cptl',
'f1_othr_reg_assets', 'f1_othr_reg_liab', 'f1_overhead', 'f1_pccidica',
'f1_plant_in_srvce', 'f1_pumped_storage', 'f1_purchased_pwr',
'f1_reconrpt_netinc', 'f1_reg_comm_expn', 'f1_respdnt_control',
'f1_retained_erng', 'f1_r_d_demo_actvty', 'f1_sales_by_sched',
'f1_sale_for_resale', 'f1_sbsdry_totals', 'f1_schedules_list',
'f1_security_holder', 'f1_slry_wg_dstrbtn', 'f1_substations',
'f1_taxacc_ppchrgyr', 'f1_unrcvrd_cost', 'f1_utltyplnt_smmry', 'f1_work',
'f1_xmssn_adds', 'f1_xmssn_elc_bothr', 'f1_xmssn_elc_fothr',
'f1_xmssn_line', 'f1_xtraordnry_loss',
'f1_hydro', 'f1_steam', 'f1_leased', 'f1_sbsdry_detail',
'f1_plant', 'f1_long_term_debt', 'f1_106_2009', 'f1_106a_2009',
'f1_106b_2009', 'f1_208_elc_dep', 'f1_231_trn_stdycst', 'f1_324_elc_expns',
'f1_325_elc_cust', 'f1_331_transiso', 'f1_338_dep_depl',
'f1_397_isorto_stl', 'f1_398_ancl_ps', 'f1_399_mth_peak',
'f1_400_sys_peak', 'f1_400a_iso_peak', 'f1_429_trans_aff',
'f1_allowances_nox', 'f1_cmpinc_hedge_a', 'f1_cmpinc_hedge',
'f1_rg_trn_srv_rev')
"""tuple: A tuple containing the FERC Form 1 tables that have the same composite
primary keys: [respondent_id, report_year, report_prd, row_number,
spplmnt_num].
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 pages 204-207, Electric Plant in Service.
# Descriptions from: https://www.law.cornell.edu/cfr/text/18/part-101
ferc_electric_plant_accounts = pd.DataFrame.from_records([
# 1. Intangible Plant
(2, '301', 'Intangible: Organization'),
(3, '302', 'Intangible: Franchises and consents'),
(4, '303', 'Intangible: Miscellaneous intangible plant'),
(5, 'subtotal_intangible', 'Subtotal: Intangible Plant'),
# 2. Production Plant
# A. steam production
(8, '310', 'Steam production: Land and land rights'),
(9, '311', 'Steam production: Structures and improvements'),
(10, '312', 'Steam production: Boiler plant equipment'),
(11, '313', 'Steam production: Engines and engine-driven generators'),
(12, '314', 'Steam production: Turbogenerator units'),
(13, '315', 'Steam production: Accessory electric equipment'),
(14, '316', 'Steam production: Miscellaneous power plant equipment'),
(15, '317', 'Steam production: Asset retirement costs for steam production\
plant'),
(16, 'subtotal_steam_production', 'Subtotal: Steam Production Plant'),
# B. nuclear production
(18, '320', 'Nuclear production: Land and land rights (Major only)'),
(19, '321', 'Nuclear production: Structures and improvements (Major\
only)'),
(20, '322', 'Nuclear production: Reactor plant equipment (Major only)'),
(21, '323', 'Nuclear production: Turbogenerator units (Major only)'),
(22, '324', 'Nuclear production: Accessory electric equipment (Major\
only)'),
(23, '325', 'Nuclear production: Miscellaneous power plant equipment\
(Major only)'),
(24, '326', 'Nuclear production: Asset retirement costs for nuclear\
production plant (Major only)'),
(25, 'subtotal_nuclear_produciton', 'Subtotal: Nuclear Production Plant'),
# C. hydraulic production
(27, '330', 'Hydraulic production: Land and land rights'),
(28, '331', 'Hydraulic production: Structures and improvements'),
(29, '332', 'Hydraulic production: Reservoirs, dams, and waterways'),
(30, '333', 'Hydraulic production: Water wheels, turbines and generators'),
(31, '334', 'Hydraulic production: Accessory electric equipment'),
(32, '335', 'Hydraulic production: Miscellaneous power plant equipment'),
(33, '336', 'Hydraulic production: Roads, railroads and bridges'),
(34, '337', 'Hydraulic production: Asset retirement costs for hydraulic\
production plant'),
(35, 'subtotal_hydraulic_production', 'Subtotal: Hydraulic Production\
Plant'),
# D. other production
(37, '340', 'Other production: Land and land rights'),
(38, '341', 'Other production: Structures and improvements'),
(39, '342', 'Other production: Fuel holders, producers, and accessories'),
(40, '343', 'Other production: Prime movers'),
(41, '344', 'Other production: Generators'),
(42, '345', 'Other production: Accessory electric equipment'),
(43, '346', 'Other production: Miscellaneous power plant equipment'),
(44, '347', 'Other production: Asset retirement costs for other production\
plant'),
(None, '348', 'Other production: Energy Storage Equipment'),
(45, 'subtotal_other_production', 'Subtotal: Other Production Plant'),
(46, 'subtotal_production', 'Subtotal: Production Plant'),
# 3. Transmission Plant,
(48, '350', 'Transmission: Land and land rights'),
(None, '351', 'Transmission: Energy Storage Equipment'),
(49, '352', 'Transmission: Structures and improvements'),
(50, '353', 'Transmission: Station equipment'),
(51, '354', 'Transmission: Towers and fixtures'),
(52, '355', 'Transmission: Poles and fixtures'),
(53, '356', 'Transmission: Overhead conductors and devices'),
(54, '357', 'Transmission: Underground conduit'),
(55, '358', 'Transmission: Underground conductors and devices'),
(56, '359', 'Transmission: Roads and trails'),
(57, '359.1', 'Transmission: Asset retirement costs for transmission\
plant'),
(58, 'subtotal_transmission', 'Subtotal: Transmission Plant'),
# 4. Distribution Plant
(60, '360', 'Distribution: Land and land rights'),
(61, '361', 'Distribution: Structures and improvements'),
(62, '362', 'Distribution: Station equipment'),
(63, '363', 'Distribution: Storage battery equipment'),
(64, '364', 'Distribution: Poles, towers and fixtures'),
(65, '365', 'Distribution: Overhead conductors and devices'),
(66, '366', 'Distribution: Underground conduit'),
(67, '367', 'Distribution: Underground conductors and devices'),
(68, '368', 'Distribution: Line transformers'),
(69, '369', 'Distribution: Services'),
(70, '370', 'Distribution: Meters'),
(71, '371', 'Distribution: Installations on customers\' premises'),
(72, '372', 'Distribution: Leased property on customers\' premises'),
(73, '373', 'Distribution: Street lighting and signal systems'),
(74, '374', 'Distribution: Asset retirement costs for distribution plant'),
(75, 'subtotal_distribution', 'Subtotal: Distribution Plant'),
# 5. Regional Transmission and Market Operation Plant
(77, '380', 'Regional transmission: Land and land rights'),
(78, '381', 'Regional transmission: Structures and improvements'),
(79, '382', 'Regional transmission: Computer hardware'),
(80, '383', 'Regional transmission: Computer software'),
(81, '384', 'Regional transmission: Communication Equipment'),
(82, '385', 'Regional transmission: Miscellaneous Regional Transmission\
and Market Operation Plant'),
(83, '386', 'Regional transmission: Asset Retirement Costs for Regional\
Transmission and Market Operation\
Plant'),
(84, 'subtotal_regional_transmission', 'Subtotal: Transmission and Market\
Operation Plant'),
(None, '387', 'Regional transmission: [Reserved]'),
# 6. General Plant
(86, '389', 'General: Land and land rights'),
(87, '390', 'General: Structures and improvements'),
(88, '391', 'General: Office furniture and equipment'),
(89, '392', 'General: Transportation equipment'),
(90, '393', 'General: Stores equipment'),
(91, '394', 'General: Tools, shop and garage equipment'),
(92, '395', 'General: Laboratory equipment'),
(93, '396', 'General: Power operated equipment'),
(94, '397', 'General: Communication equipment'),
(95, '398', 'General: Miscellaneous equipment'),
(96, 'subtotal_general', 'Subtotal: General Plant'),
(97, '399', 'General: Other tangible property'),
(98, '399.1', 'General: Asset retirement costs for general plant'),
(99, 'total_general', 'TOTAL General Plant'),
(100, '101_and_106', 'Electric plant in service (Major only)'),
(101, '102_purchased', 'Electric plant purchased'),
(102, '102_sold', 'Electric plant sold'),
(103, '103', 'Experimental plant unclassified'),
(104, 'total_electric_plant', 'TOTAL Electric Plant in Service')],
columns=['row_number', 'ferc_account_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 pages 204 - 207, Electric Plant in
Service.
"""
# Line numbers, and corresponding FERC account number
# from FERC Form 1 page 219, ACCUMULATED PROVISION FOR DEPRECIATION
# OF ELECTRIC UTILITY PLANT (Account 108).
ferc_accumulated_depreciation = pd.DataFrame.from_records([
# Section A. Balances and Changes During Year
(1, 'balance_beginning_of_year', 'Balance Beginning of Year'),
(3, 'depreciation_expense', '(403) Depreciation Expense'),
(4, 'depreciation_expense_asset_retirement', \
'(403.1) Depreciation Expense for Asset Retirement Costs'),
(5, 'expense_electric_plant_leased_to_others', \
'(413) Exp. of Elec. Plt. Leas. to Others'),
(6, 'transportation_expenses_clearing',\
'Transportation Expenses-Clearing'),
(7, 'other_clearing_accounts', 'Other Clearing Accounts'),
(8, 'other_accounts_specified',\
'Other Accounts (Specify, details in footnote):'),
# blank: might also be other charges like line 17.
(9, 'other_charges', 'Other Charges:'),
(10, 'total_depreciation_provision_for_year',\
'TOTAL Deprec. Prov for Year (Enter Total of lines 3 thru 9)'),
(11, 'net_charges_for_plant_retired', 'Net Charges for Plant Retired:'),
(12, 'book_cost_of_plant_retired', 'Book Cost of Plant Retired'),
(13, 'cost_of_removal', 'Cost of Removal'),
(14, 'salvage_credit', 'Salvage (Credit)'),
(15, 'total_net_charges_for_plant_retired',\
'TOTAL Net Chrgs. for Plant Ret. (Enter Total of lines 12 thru 14)'),
(16, 'other_debit_or_credit_items',\
'Other Debit or Cr. Items (Describe, details in footnote):'),
# blank: can be "Other Charges", e.g. in 2012 for PSCo.
(17, 'other_charges_2', 'Other Charges 2'),
(18, 'book_cost_or_asset_retirement_costs_retired',\
'Book Cost or Asset Retirement Costs Retired'),
(19, 'balance_end_of_year', \
'Balance End of Year (Enter Totals of lines 1, 10, 15, 16, and 18)'),
# Section B. Balances at End of Year According to Functional Classification
(20, 'steam_production_end_of_year', 'Steam Production'),
(21, 'nuclear_production_end_of_year', 'Nuclear Production'),
(22, 'hydraulic_production_end_of_year',\
'Hydraulic Production-Conventional'),
(23, 'pumped_storage_end_of_year', 'Hydraulic Production-Pumped Storage'),
(24, 'other_production', 'Other Production'),
(25, 'transmission', 'Transmission'),
(26, 'distribution', 'Distribution'),
(27, 'regional_transmission_and_market_operation',
'Regional Transmission and Market Operation'),
(28, 'general', 'General'),
(29, 'total', 'TOTAL (Enter Total of lines 20 thru 28)')],
columns=['row_number', 'line_id', 'ferc_account_description'])
"""list: A list of tuples containing row numbers, FERC account IDs, and FERC
account descriptions from FERC Form 1 page 219, Accumulated Provision for
Depreciation of electric utility plant(Account 108).
"""
######################################################################
# Constants from EIA From 923 used within init.py module
######################################################################
# From Page 7 of EIA Form 923, Census Region a US state is located in
census_region = {
'NEW': 'New England',
'MAT': 'Middle Atlantic',
'SAT': 'South Atlantic',
'ESC': 'East South Central',
'WSC': 'West South Central',
'ENC': 'East North Central',
'WNC': 'West North Central',
'MTN': 'Mountain',
'PACC': 'Pacific Contiguous (OR, WA, CA)',
'PACN': 'Pacific Non-Contiguous (AK, HI)',
}
"""dict: A dictionary mapping Census Region abbreviations (keys) to Census
Region names (values).
"""
# From Page 7 of EIA Form923
# Static list of NERC (North American Electric Reliability Corporation)
# regions, used for where plant is located
nerc_region = {
'NPCC': 'Northeast Power Coordinating Council',
'ASCC': 'Alaska Systems Coordinating Council',
'HICC': 'Hawaiian Islands Coordinating Council',
'MRO': 'Midwest Reliability Organization',
'SERC': 'SERC Reliability Corporation',
'RFC': 'Reliability First Corporation',
'SPP': 'Southwest Power Pool',
'TRE': 'Texas Regional Entity',
'FRCC': 'Florida Reliability Coordinating Council',
'WECC': 'Western Electricity Coordinating Council'
}
"""dict: A dictionary mapping NERC Region abbreviations (keys) to NERC
Region names (values).
"""
# From Page 7 of EIA Form 923 EIA’s internal consolidated NAICS sectors.
# For internal purposes, EIA consolidates NAICS categories into seven groups.
sector_eia = {
# traditional regulated electric utilities
'1': 'Electric Utility',
# Independent power producers which are not cogenerators
'2': 'NAICS-22 Non-Cogen',
# Independent power producers which are cogenerators, but whose
# primary business purpose is the sale of electricity to the public
'3': 'NAICS-22 Cogen',
# Commercial non-cogeneration facilities that produce electric power,
# are connected to the gird, and can sell power to the public
'4': 'Commercial NAICS Non-Cogen',
# Commercial cogeneration facilities that produce electric power, are
# connected to the grid, and can sell power to the public
'5': 'Commercial NAICS Cogen',
# Industrial non-cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'6': 'Industrial NAICS Non-Cogen',
# Industrial cogeneration facilities that produce electric power, are
# connected to the gird, and can sell power to the public
'7': 'Industrial NAICS Cogen'
}
"""dict: A dictionary mapping EIA numeric codes (keys) to EIA’s internal
consolidated NAICS sectors (values).
"""
# EIA 923: EIA Type of prime mover:
prime_movers_eia923 = {
'BA': 'Energy Storage, Battery',
'BT': 'Turbines Used in a Binary Cycle. Including those used for geothermal applications',
'CA': 'Combined-Cycle -- Steam Part',
'CC': 'Combined-Cycle, Total Unit',
'CE': 'Energy Storage, Compressed Air',
'CP': 'Energy Storage, Concentrated Solar Power',
'CS': 'Combined-Cycle Single-Shaft Combustion Turbine and Steam Turbine share of single',
'CT': 'Combined-Cycle Combustion Turbine Part',
'ES': 'Energy Storage, Other (Specify on Schedule 9, Comments)',
'FC': 'Fuel Cell',
'FW': 'Energy Storage, Flywheel',
'GT': 'Combustion (Gas) Turbine. Including Jet Engine design',
'HA': 'Hydrokinetic, Axial Flow Turbine',
'HB': 'Hydrokinetic, Wave Buoy',
'HK': 'Hydrokinetic, Other',
'HY': 'Hydraulic Turbine. Including turbines associated with delivery of water by pipeline.',
'IC': 'Internal Combustion (diesel, piston, reciprocating) Engine',
'PS': 'Energy Storage, Reversible Hydraulic Turbine (Pumped Storage)',
'OT': 'Other',
'ST': 'Steam Turbine. Including Nuclear, Geothermal, and Solar Steam (does not include Combined Cycle).',
'PV': 'Photovoltaic',
'WT': 'Wind Turbine, Onshore',
'WS': 'Wind Turbine, Offshore'
}
"""dict: A dictionary mapping EIA 923 prime mover codes (keys) and prime mover
names / descriptions (values).
"""
# EIA 923: The fuel code reported to EIA.Two or three letter alphanumeric:
fuel_type_eia923 = {
'AB': 'Agricultural By-Products',
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BIT': 'Bituminous Coal',
'BLQ': 'Black Liquor',
'CBL': 'Coal, Blended',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'GEO': 'Geothermal',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LFG': 'Landfill Gas',
'LIG': 'Lignite Coal',
'MSB': 'Biogenic Municipal Solid Waste',
'MSN': 'Non-biogenic Municipal Solid Waste',
'MSW': 'Municipal Solid Waste',
'MWH': 'Electricity used for energy storage',
'NG': 'Natural Gas',
'NUC': 'Nuclear. Including Uranium, Plutonium, and Thorium.',
'OBG': 'Other Biomass Gas. Including digester gas, methane, and other biomass gases.',
'OBL': 'Other Biomass Liquids',
'OBS': 'Other Biomass Solids',
'OG': 'Other Gas',
'OTH': 'Other Fuel',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propane',
'PUR': 'Purchased Steam',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SGC': 'Coal-Derived Synthesis Gas',
'SGP': 'Synthesis Gas from Petroleum Coke',
'SLW': 'Sludge Waste',
'SUB': 'Subbituminous Coal',
'SUN': 'Solar',
'TDF': 'Tire-derived Fuels',
'WAT': 'Water at a Conventional Hydroelectric Turbine and water used in Wave Buoy Hydrokinetic Technology, current Hydrokinetic Technology, Tidal Hydrokinetic Technology, and Pumping Energy for Reversible (Pumped Storage) Hydroelectric Turbines.',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WDL': 'Wood Waste Liquids, excluding Black Liquor. Including red liquor, sludge wood, spent sulfite liquor, and other wood-based liquids.',
'WDS': 'Wood/Wood Waste Solids. Including paper pellets, railroad ties, utility polies, wood chips, bark, and other wood waste solids.',
'WH': 'Waste Heat not directly attributed to a fuel source',
'WND': 'Wind',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.'
}
"""dict: A dictionary mapping EIA 923 fuel type codes (keys) and fuel type
names / descriptions (values).
"""
# Fuel type strings for EIA 923 generator fuel table
fuel_type_eia923_gen_fuel_coal_strings = [
'ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with coal fuel.
"""
fuel_type_eia923_gen_fuel_oil_strings = [
'dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: The list of EIA 923 Generation Fuel strings associated with oil fuel.
"""
fuel_type_eia923_gen_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: The list of EIA 923 Generation Fuel strings associated with gas fuel.
"""
fuel_type_eia923_gen_fuel_solar_strings = ['sun', ]
"""list: The list of EIA 923 Generation Fuel strings associated with solar
power.
"""
fuel_type_eia923_gen_fuel_wind_strings = ['wnd', ]
"""list: The list of EIA 923 Generation Fuel strings associated with wind
power.
"""
fuel_type_eia923_gen_fuel_hydro_strings = ['wat', ]
"""list: The list of EIA 923 Generation Fuel strings associated with hydro
power.
"""
fuel_type_eia923_gen_fuel_nuclear_strings = ['nuc', ]
"""list: The list of EIA 923 Generation Fuel strings associated with nuclear
power.
"""
fuel_type_eia923_gen_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'msw', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds']
"""list: The list of EIA 923 Generation Fuel strings associated with solid waste
fuel.
"""
fuel_type_eia923_gen_fuel_other_strings = ['geo', 'mwh', 'oth', 'pur', 'wh', ]
"""list: The list of EIA 923 Generation Fuel strings associated with geothermal
power.
"""
fuel_type_eia923_gen_fuel_simple_map = {
'coal': fuel_type_eia923_gen_fuel_coal_strings,
'oil': fuel_type_eia923_gen_fuel_oil_strings,
'gas': fuel_type_eia923_gen_fuel_gas_strings,
'solar': fuel_type_eia923_gen_fuel_solar_strings,
'wind': fuel_type_eia923_gen_fuel_wind_strings,
'hydro': fuel_type_eia923_gen_fuel_hydro_strings,
'nuclear': fuel_type_eia923_gen_fuel_nuclear_strings,
'waste': fuel_type_eia923_gen_fuel_waste_strings,
'other': fuel_type_eia923_gen_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Generation Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# Fuel type strings for EIA 923 boiler fuel table
fuel_type_eia923_boiler_fuel_coal_strings = [
'ant', 'bit', 'lig', 'pc', 'rc', 'sc', 'sub', 'wc', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
coal.
"""
fuel_type_eia923_boiler_fuel_oil_strings = ['dfo', 'rfo', 'wo', 'jf', 'ker', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
oil.
"""
fuel_type_eia923_boiler_fuel_gas_strings = [
'bfg', 'lfg', 'ng', 'og', 'obg', 'pg', 'sgc', 'sgp', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
gas.
"""
fuel_type_eia923_boiler_fuel_waste_strings = [
'ab', 'blq', 'msb', 'msn', 'obl', 'obs', 'slw', 'tdf', 'wdl', 'wds', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
waste.
"""
fuel_type_eia923_boiler_fuel_other_strings = ['oth', 'pur', 'wh', ]
"""list: A list of strings from EIA 923 Boiler Fuel associated with fuel type
other.
"""
fuel_type_eia923_boiler_fuel_simple_map = {
'coal': fuel_type_eia923_boiler_fuel_coal_strings,
'oil': fuel_type_eia923_boiler_fuel_oil_strings,
'gas': fuel_type_eia923_boiler_fuel_gas_strings,
'waste': fuel_type_eia923_boiler_fuel_waste_strings,
'other': fuel_type_eia923_boiler_fuel_other_strings,
}
"""dict: A dictionary mapping EIA 923 Boiler Fuel fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# PUDL consolidation of EIA923 AER fuel type strings into same categories as
# 'energy_source_eia923' plus additional renewable and nuclear categories.
# These classifications are not currently used, as the EIA fuel type and energy
# source designations provide more detailed information.
aer_coal_strings = ['col', 'woc', 'pc']
"""list: A list of EIA 923 AER fuel type strings associated with coal.
"""
aer_gas_strings = ['mlg', 'ng', 'oog']
"""list: A list of EIA 923 AER fuel type strings associated with gas.
"""
aer_oil_strings = ['dfo', 'rfo', 'woo']
"""list: A list of EIA 923 AER fuel type strings associated with oil.
"""
aer_solar_strings = ['sun']
"""list: A list of EIA 923 AER fuel type strings associated with solar power.
"""
aer_wind_strings = ['wnd']
"""list: A list of EIA 923 AER fuel type strings associated with wind power.
"""
aer_hydro_strings = ['hps', 'hyc']
"""list: A list of EIA 923 AER fuel type strings associated with hydro power.
"""
aer_nuclear_strings = ['nuc']
"""list: A list of EIA 923 AER fuel type strings associated with nuclear power.
"""
aer_waste_strings = ['www']
"""list: A list of EIA 923 AER fuel type strings associated with waste.
"""
aer_other_strings = ['geo', 'orw', 'oth']
"""list: A list of EIA 923 AER fuel type strings associated with other fuel.
"""
aer_fuel_type_strings = {
'coal': aer_coal_strings,
'gas': aer_gas_strings,
'oil': aer_oil_strings,
'solar': aer_solar_strings,
'wind': aer_wind_strings,
'hydro': aer_hydro_strings,
'nuclear': aer_nuclear_strings,
'waste': aer_waste_strings,
'other': aer_other_strings
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923: A partial aggregation of the reported fuel type codes into
# larger categories used by EIA in, for example,
# the Annual Energy Review (AER).Two or three letter alphanumeric.
# See the Fuel Code table (Table 5), below:
fuel_type_aer_eia923 = {
'SUN': 'Solar PV and thermal',
'COL': 'Coal',
'DFO': 'Distillate Petroleum',
'GEO': 'Geothermal',
'HPS': 'Hydroelectric Pumped Storage',
'HYC': 'Hydroelectric Conventional',
'MLG': 'Biogenic Municipal Solid Waste and Landfill Gas',
'NG': 'Natural Gas',
'NUC': 'Nuclear',
'OOG': 'Other Gases',
'ORW': 'Other Renewables',
'OTH': 'Other (including nonbiogenic MSW)',
'PC': 'Petroleum Coke',
'RFO': 'Residual Petroleum',
'WND': 'Wind',
'WOC': 'Waste Coal',
'WOO': 'Waste Oil',
'WWW': 'Wood and Wood Waste'
}
"""dict: A dictionary mapping EIA 923 AER fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
fuel_type_eia860_coal_strings = ['ant', 'bit', 'cbl', 'lig', 'pc', 'rc', 'sc',
'sub', 'wc', 'coal', 'petroleum coke', 'col',
'woc']
"""list: A list of strings from EIA 860 associated with fuel type coal.
"""
fuel_type_eia860_oil_strings = ['dfo', 'jf', 'ker', 'rfo', 'wo', 'woo',
'petroleum']
"""list: A list of strings from EIA 860 associated with fuel type oil.
"""
fuel_type_eia860_gas_strings = ['bfg', 'lfg', 'mlg', 'ng', 'obg', 'og', 'pg',
'sgc', 'sgp', 'natural gas', 'other gas',
'oog', 'sg']
"""list: A list of strings from EIA 860 associated with fuel type gas.
"""
fuel_type_eia860_solar_strings = ['sun', 'solar']
"""list: A list of strings from EIA 860 associated with solar power.
"""
fuel_type_eia860_wind_strings = ['wnd', 'wind', 'wt']
"""list: A list of strings from EIA 860 associated with wind power.
"""
fuel_type_eia860_hydro_strings = ['wat', 'hyc', 'hps', 'hydro']
"""list: A list of strings from EIA 860 associated with hydro power.
"""
fuel_type_eia860_nuclear_strings = ['nuc', 'nuclear']
"""list: A list of strings from EIA 860 associated with nuclear power.
"""
fuel_type_eia860_waste_strings = ['ab', 'blq', 'bm', 'msb', 'msn', 'obl',
'obs', 'slw', 'tdf', 'wdl', 'wds', 'biomass',
'msw', 'www']
"""list: A list of strings from EIA 860 associated with fuel type waste.
"""
fuel_type_eia860_other_strings = ['mwh', 'oth', 'pur', 'wh', 'geo', 'none',
'orw', 'other']
"""list: A list of strings from EIA 860 associated with fuel type other.
"""
fuel_type_eia860_simple_map = {
'coal': fuel_type_eia860_coal_strings,
'oil': fuel_type_eia860_oil_strings,
'gas': fuel_type_eia860_gas_strings,
'solar': fuel_type_eia860_solar_strings,
'wind': fuel_type_eia860_wind_strings,
'hydro': fuel_type_eia860_hydro_strings,
'nuclear': fuel_type_eia860_nuclear_strings,
'waste': fuel_type_eia860_waste_strings,
'other': fuel_type_eia860_other_strings,
}
"""dict: A dictionary mapping EIA 860 fuel types (keys) to lists
of strings associated with that fuel type (values).
"""
# EIA 923/860: Lumping of energy source categories.
energy_source_eia_simple_map = {
'coal': ['ANT', 'BIT', 'LIG', 'PC', 'SUB', 'WC', 'RC'],
'oil': ['DFO', 'JF', 'KER', 'RFO', 'WO'],
'gas': ['BFG', 'LFG', 'NG', 'OBG', 'OG', 'PG', 'SG', 'SGC', 'SGP'],
'solar': ['SUN'],
'wind': ['WND'],
'hydro': ['WAT'],
'nuclear': ['NUC'],
'waste': ['AB', 'BLQ', 'MSW', 'OBL', 'OBS', 'SLW', 'TDF', 'WDL', 'WDS'],
'other': ['GEO', 'MWH', 'OTH', 'PUR', 'WH']
}
"""dict: A dictionary mapping EIA fuel types (keys) to fuel codes (values).
"""
fuel_group_eia923_simple_map = {
'coal': ['coal', 'petroleum coke'],
'oil': ['petroleum'],
'gas': ['natural gas', 'other gas']
}
"""dict: A dictionary mapping EIA 923 simple fuel types("oil", "coal", "gas")
(keys) to fuel types (values).
"""
# EIA 923: The type of physical units fuel consumption is reported in.
# All consumption is reported in either short tons for solids,
# thousands of cubic feet for gases, and barrels for liquids.
fuel_units_eia923 = {
'mcf': 'Thousands of cubic feet (for gases)',
'short_tons': 'Short tons (for solids)',
'barrels': 'Barrels (for liquids)'
}
"""dict: A dictionary mapping EIA 923 fuel units (keys) to fuel unit
descriptions (values).
"""
# EIA 923: Designates the purchase type under which receipts occurred
# in the reporting month. One or two character alphanumeric:
contract_type_eia923 = {
'C': 'Contract - Fuel received under a purchase order or contract with a term of one year or longer. Contracts with a shorter term are considered spot purchases ',
'NC': 'New Contract - Fuel received under a purchase order or contract with duration of one year or longer, under which deliveries were first made during the reporting month',
'N': 'New Contract - see NC code. This abbreviation existed only in 2008 before being replaced by NC.',
'S': 'Spot Purchase',
'T': 'Tolling Agreement – Fuel received under a tolling agreement (bartering arrangement of fuel for generation)'
}
"""dict: A dictionary mapping EIA 923 contract codes (keys) to contract
descriptions (values) for each month in the Fuel Receipts and Costs table.
"""
# EIA 923: The fuel code associated with the fuel receipt.
# Defined on Page 7 of EIA Form 923
# Two or three character alphanumeric:
energy_source_eia923 = {
'ANT': 'Anthracite Coal',
'BFG': 'Blast Furnace Gas',
'BM': 'Biomass',
'BIT': 'Bituminous Coal',
'DFO': 'Distillate Fuel Oil. Including diesel, No. 1, No. 2, and No. 4 fuel oils.',
'JF': 'Jet Fuel',
'KER': 'Kerosene',
'LIG': 'Lignite Coal',
'NG': 'Natural Gas',
'PC': 'Petroleum Coke',
'PG': 'Gaseous Propone',
'OG': 'Other Gas',
'RC': 'Refined Coal',
'RFO': 'Residual Fuel Oil. Including No. 5 & 6 fuel oils and bunker C fuel oil.',
'SG': 'Synthesis Gas from Petroleum Coke',
'SGP': 'Petroleum Coke Derived Synthesis Gas',
'SC': 'Coal-based Synfuel. Including briquettes, pellets, or extrusions, which are formed by binding materials or processes that recycle materials.',
'SUB': 'Subbituminous Coal',
'WC': 'Waste/Other Coal. Including anthracite culm, bituminous gob, fine coal, lignite waste, waste coal.',
'WO': 'Waste/Other Oil. Including crude oil, liquid butane, liquid propane, naphtha, oil waste, re-refined moto oil, sludge oil, tar oil, or other petroleum-based liquid wastes.',
}
"""dict: A dictionary mapping fuel codes (keys) to fuel descriptions (values)
for each fuel receipt from the EIA 923 Fuel Receipts and Costs table.
"""
# EIA 923 Fuel Group, from Page 7 EIA Form 923
# Groups fossil fuel energy sources into fuel groups that are located in the
# Electric Power Monthly: Coal, Natural Gas, Petroleum, Petroleum Coke.
fuel_group_eia923 = (
'coal',
'natural_gas',
'petroleum',
'petroleum_coke',
'other_gas'
)
"""tuple: A tuple containing EIA 923 fuel groups.
"""
# EIA 923: Type of Coal Mine as defined on Page 7 of EIA Form 923
coalmine_type_eia923 = {
'P': 'Preparation Plant',
'S': 'Surface',
'U': 'Underground',
'US': 'Both an underground and surface mine with most coal extracted from underground',
'SU': 'Both an underground and surface mine with most coal extracted from surface',
}
"""dict: A dictionary mapping EIA 923 coal mine type codes (keys) to
descriptions (values).
"""
# EIA 923: State abbreviation related to coal mine location.
# Country abbreviations are also used in this category, but they are
# non-standard because of collisions with US state names. Instead of using
# the provided non-standard names, we convert to ISO-3166-1 three letter
# country codes https://en.wikipedia.org/wiki/ISO_3166-1_alpha-3
coalmine_country_eia923 = {
'AU': 'AUS', # Australia
'CL': 'COL', # Colombia
'CN': 'CAN', # Canada
'IS': 'IDN', # Indonesia
'PL': 'POL', # Poland
'RS': 'RUS', # Russia
'UK': 'GBR', # United Kingdom of Great Britain
'VZ': 'VEN', # Venezuela
'OT': 'other_country',
'IM': 'unknown'
}
"""dict: A dictionary mapping coal mine country codes (keys) to ISO-3166-1 three
letter country codes (values).
"""
# EIA 923: Mode for the longest / second longest distance.
transport_modes_eia923 = {
'RR': 'Rail: Shipments of fuel moved to consumers by rail \
(private or public/commercial). Included is coal hauled to or \
away from a railroad siding by truck if the truck did not use public\
roads.',
'RV': 'River: Shipments of fuel moved to consumers via river by barge. \
Not included are shipments to Great Lakes coal loading docks, \
tidewater piers, or coastal ports.',
'GL': 'Great Lakes: Shipments of coal moved to consumers via \
the Great Lakes. These shipments are moved via the Great Lakes \
coal loading docks, which are identified by name and location as \
follows: Conneaut Coal Storage & Transfer, Conneaut, Ohio; \
NS Coal Dock (Ashtabula Coal Dock), Ashtabula, Ohio; \
Sandusky Coal Pier, Sandusky, Ohio; Toledo Docks, Toledo, Ohio; \
KCBX Terminals Inc., Chicago, Illinois; \
Superior Midwest Energy Terminal, Superior, Wisconsin',
'TP': 'Tidewater Piers and Coastal Ports: Shipments of coal moved to \
Tidewater Piers and Coastal Ports for further shipments to consumers \
via coastal water or ocean. The Tidewater Piers and Coastal Ports \
are identified by name and location as follows: Dominion Terminal \
Associates, Newport News, Virginia; McDuffie Coal Terminal, Mobile, \
Alabama; IC Railmarine Terminal, Convent, Louisiana; \
International Marine Terminals, Myrtle Grove, Louisiana; \
Cooper/T. Smith Stevedoring Co. Inc., Darrow, Louisiana; \
Seward Terminal Inc., Seward, Alaska; Los Angeles Export Terminal, \
Inc., Los Angeles, California; Levin-Richmond Terminal Corp., \
Richmond, California; Baltimore Terminal, Baltimore, Maryland; \
Norfolk Southern Lamberts Point P-6, Norfolk, Virginia; \
Chesapeake Bay Piers, Baltimore, Maryland; Pier IX Terminal Company, \
Newport News, Virginia; Electro-Coal Transport Corp., Davant, \
Louisiana',
'WT': 'Water: Shipments of fuel moved to consumers by other waterways.',
'TR': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'tr': 'Truck: Shipments of fuel moved to consumers by truck. \
Not included is fuel hauled to or away from a railroad siding by \
truck on non-public roads.',
'TC': 'Tramway/Conveyor: Shipments of fuel moved to consumers \
by tramway or conveyor.',
'SP': 'Slurry Pipeline: Shipments of coal moved to consumers \
by slurry pipeline.',
'PL': 'Pipeline: Shipments of fuel moved to consumers by pipeline'
}
"""dict: A dictionary mapping primary and secondary transportation mode codes
(keys) to descriptions (values).
"""
# we need to include all of the columns which we want to keep for either the
# entity or annual tables. The order here matters. We need to harvest the plant
# location before harvesting the location of the utilites for example.
entities = {
'plants': [
# base cols
['plant_id_eia'],
# static cols
['balancing_authority_code_eia', 'balancing_authority_name_eia',
'city', 'county', 'ferc_cogen_status',
'ferc_exempt_wholesale_generator', 'ferc_small_power_producer',
'grid_voltage_2_kv', 'grid_voltage_3_kv', 'grid_voltage_kv',
'iso_rto_code', 'latitude', 'longitude', 'service_area',
'plant_name_eia', 'primary_purpose_naics_id',
'sector_id', 'sector_name', 'state', 'street_address', 'zip_code'],
# annual cols
['ash_impoundment', 'ash_impoundment_lined', 'ash_impoundment_status',
'datum', 'energy_storage', 'ferc_cogen_docket_no', 'water_source',
'ferc_exempt_wholesale_generator_docket_no',
'ferc_small_power_producer_docket_no',
'liquefied_natural_gas_storage',
'natural_gas_local_distribution_company', 'natural_gas_storage',
'natural_gas_pipeline_name_1', 'natural_gas_pipeline_name_2',
'natural_gas_pipeline_name_3', 'nerc_region', 'net_metering',
'pipeline_notes', 'regulatory_status_code',
'transmission_distribution_owner_id',
'transmission_distribution_owner_name',
'transmission_distribution_owner_state', 'utility_id_eia'],
# need type fixing
{},
],
'generators': [
# base cols
['plant_id_eia', 'generator_id'],
# static cols
['prime_mover_code', 'duct_burners', 'operating_date',
'topping_bottoming_code', 'solid_fuel_gasification',
'pulverized_coal_tech', 'fluidized_bed_tech', 'subcritical_tech',
'supercritical_tech', 'ultrasupercritical_tech', 'stoker_tech',
'other_combustion_tech', 'bypass_heat_recovery',
'rto_iso_lmp_node_id', 'rto_iso_location_wholesale_reporting_id',
'associated_combined_heat_power', 'original_planned_operating_date',
'operating_switch', 'previously_canceled'],
# annual cols
['capacity_mw', 'fuel_type_code_pudl', 'multiple_fuels',
'ownership_code', 'owned_by_non_utility', 'deliver_power_transgrid',
'summer_capacity_mw', 'winter_capacity_mw', 'summer_capacity_estimate',
'winter_capacity_estimate', 'minimum_load_mw', 'distributed_generation',
'technology_description', 'reactive_power_output_mvar',
'energy_source_code_1', 'energy_source_code_2',
'energy_source_code_3', 'energy_source_code_4',
'energy_source_code_5', 'energy_source_code_6',
'energy_source_1_transport_1', 'energy_source_1_transport_2',
'energy_source_1_transport_3', 'energy_source_2_transport_1',
'energy_source_2_transport_2', 'energy_source_2_transport_3',
'startup_source_code_1', 'startup_source_code_2',
'startup_source_code_3', 'startup_source_code_4',
'time_cold_shutdown_full_load_code', 'syncronized_transmission_grid',
'turbines_num', 'operational_status_code', 'operational_status',
'planned_modifications', 'planned_net_summer_capacity_uprate_mw',
'planned_net_winter_capacity_uprate_mw', 'planned_new_capacity_mw',
'planned_uprate_date', 'planned_net_summer_capacity_derate_mw',
'planned_net_winter_capacity_derate_mw', 'planned_derate_date',
'planned_new_prime_mover_code', 'planned_energy_source_code_1',
'planned_repower_date', 'other_planned_modifications',
'other_modifications_date', 'planned_retirement_date',
'carbon_capture', 'cofire_fuels', 'switch_oil_gas',
'turbines_inverters_hydrokinetics', 'nameplate_power_factor',
'uprate_derate_during_year', 'uprate_derate_completed_date',
'current_planned_operating_date', 'summer_estimated_capability_mw',
'winter_estimated_capability_mw', 'retirement_date',
'utility_id_eia', 'data_source'],
# need type fixing
{}
],
# utilities must come after plants. plant location needs to be
# removed before the utility locations are compiled
'utilities': [
# base cols
['utility_id_eia'],
# static cols
['utility_name_eia'],
# annual cols
['street_address', 'city', 'state', 'zip_code', 'entity_type',
'plants_reported_owner', 'plants_reported_operator',
'plants_reported_asset_manager', 'plants_reported_other_relationship',
'attention_line', 'address_2', 'zip_code_4',
'contact_firstname', 'contact_lastname', 'contact_title',
'contact_firstname_2', 'contact_lastname_2', 'contact_title_2',
'phone_extension_1', 'phone_extension_2', 'phone_number_1',
'phone_number_2'],
# need type fixing
{'utility_id_eia': 'int64', }, ],
'boilers': [
# base cols
['plant_id_eia', 'boiler_id'],
# static cols
['prime_mover_code'],
# annual cols
[],
# need type fixing
{},
]
}
"""dict: A dictionary containing table name strings (keys) and lists of columns
to keep for those tables (values).
"""
epacems_tables = ("hourly_emissions_epacems")
"""tuple: A tuple containing tables of EPA CEMS data to pull into PUDL.
"""
files_dict_epaipm = {
'transmission_single_epaipm': '*table_3-21*',
'transmission_joint_epaipm': '*transmission_joint_ipm*',
'load_curves_epaipm': '*table_2-2_*',
'plant_region_map_epaipm': '*needs_v6*',
}
"""dict: A dictionary of EPA IPM tables and strings that files of those tables
contain.
"""
epaipm_url_ext = {
'transmission_single_epaipm': 'table_3-21_annual_transmission_capabilities_of_u.s._model_regions_in_epa_platform_v6_-_2021.xlsx',
'load_curves_epaipm': 'table_2-2_load_duration_curves_used_in_epa_platform_v6.xlsx',
'plant_region_map_epaipm': 'needs_v6_november_2018_reference_case_0.xlsx',
}
"""dict: A dictionary of EPA IPM tables and associated URLs extensions for
downloading that table's data.
"""
epaipm_region_names = [
'ERC_PHDL', 'ERC_REST', 'ERC_FRNT', 'ERC_GWAY', 'ERC_WEST',
'FRCC', 'NENG_CT', 'NENGREST', 'NENG_ME', 'MIS_AR', 'MIS_IL',
'MIS_INKY', 'MIS_IA', 'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI',
'MIS_D_MS', 'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA',
'MIS_WUMS', 'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D', 'NY_Z_F',
'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K', 'PJM_West', 'PJM_AP', 'PJM_ATSI',
'PJM_COMD', 'PJM_Dom', 'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC',
'PJM_WMAC', 'S_C_KY', 'S_C_TVA', 'S_D_AECI', 'S_SOU', 'S_VACA',
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE',
'WECC_AZ', 'WEC_BANC', 'WECC_CO', 'WECC_ID', 'WECC_IID',
'WEC_LADW', 'WECC_MT', 'WECC_NM', 'WEC_CALN', 'WECC_NNV',
'WECC_PNW', 'WEC_SDGE', 'WECC_SCE', 'WECC_SNV', 'WECC_UT',
'WECC_WY', 'CN_AB', 'CN_BC', 'CN_NL', 'CN_MB', 'CN_NB', 'CN_NF',
'CN_NS', 'CN_ON', 'CN_PE', 'CN_PQ', 'CN_SK',
]
"""list: A list of EPA IPM region names."""
epaipm_region_aggregations = {
'PJM': [
'PJM_AP', 'PJM_ATSI', 'PJM_COMD', 'PJM_Dom',
'PJM_EMAC', 'PJM_PENE', 'PJM_SMAC', 'PJM_WMAC'
],
'NYISO': [
'NY_Z_A', 'NY_Z_B', 'NY_Z_C&E', 'NY_Z_D',
'NY_Z_F', 'NY_Z_G-I', 'NY_Z_J', 'NY_Z_K'
],
'ISONE': ['NENG_CT', 'NENGREST', 'NENG_ME'],
'MISO': [
'MIS_AR', 'MIS_IL', 'MIS_INKY', 'MIS_IA',
'MIS_MIDA', 'MIS_LA', 'MIS_LMI', 'MIS_MNWI', 'MIS_D_MS',
'MIS_MO', 'MIS_MAPP', 'MIS_AMSO', 'MIS_WOTA', 'MIS_WUMS'
],
'SPP': [
'SPP_NEBR', 'SPP_N', 'SPP_SPS', 'SPP_WEST', 'SPP_KIAM', 'SPP_WAUE'
],
'WECC_NW': [
'WECC_CO', 'WECC_ID', 'WECC_MT', 'WECC_NNV',
'WECC_PNW', 'WECC_UT', 'WECC_WY'
]
}
"""
dict: A dictionary containing EPA IPM regions (keys) and lists of their
associated abbreviations (values).
"""
epaipm_rename_dict = {
'transmission_single_epaipm': {
'From': 'region_from',
'To': 'region_to',
'Capacity TTC (MW)': 'firm_ttc_mw',
'Energy TTC (MW)': 'nonfirm_ttc_mw',
'Transmission Tariff (2016 mills/kWh)': 'tariff_mills_kwh',
},
'load_curves_epaipm': {
'day': 'day_of_year',
'region': 'region_id_epaipm',
},
'plant_region_map_epaipm': {
'ORIS Plant Code': 'plant_id_eia',
'Region Name': 'region',
},
}
glue_pudl_tables = ('plants_eia', 'plants_ferc', 'plants', 'utilities_eia',
'utilities_ferc', 'utilities', 'utility_plant_assn')
"""
dict: A dictionary of dictionaries containing EPA IPM tables (keys) and items
for each table to be renamed along with the replacement name (values).
"""
data_sources = (
'eia860',
'eia861',
'eia923',
'epacems',
'epaipm',
'ferc1',
'ferc714',
# 'pudl'
)
"""tuple: A tuple containing the data sources we are able to pull into PUDL."""
# All the years for which we ought to be able to download these data sources
data_years = {
'eia860': tuple(range(2001, 2020)),
'eia861': tuple(range(1990, 2020)),
'eia923': tuple(range(2001, 2020)),
'epacems': tuple(range(1995, 2021)),
'epaipm': (None, ),
'ferc1': tuple(range(1994, 2020)),
'ferc714': (None, ),
}
"""
dict: A dictionary of data sources (keys) and tuples containing the years
that we expect to be able to download for each data source (values).
"""
# The full set of years we currently expect to be able to ingest, per source:
working_partitions = {
'eia860': {
'years': tuple(range(2004, 2020))
},
'eia860m': {
'year_month': '2020-11'
},
'eia861': {
'years': tuple(range(2001, 2020))
},
'eia923': {
'years': tuple(range(2001, 2020))
},
'epacems': {
'years': tuple(range(1995, 2021)),
'states': tuple(cems_states.keys())},
'ferc1': {
'years': tuple(range(1994, 2020))
},
'ferc714': {},
}
"""
dict: A dictionary of data sources (keys) and dictionaries (values) of names of
partition type (sub-key) and paritions (sub-value) containing the paritions
such as tuples of years for each data source that are able to be ingested
into PUDL.
"""
pudl_tables = {
'eia860': eia860_pudl_tables,
'eia861': (
"service_territory_eia861",
"balancing_authority_eia861",
"sales_eia861",
"advanced_metering_infrastructure_eia861",
"demand_response_eia861",
"demand_side_management_eia861",
"distributed_generation_eia861",
"distribution_systems_eia861",
"dynamic_pricing_eia861",
"energy_efficiency_eia861",
"green_pricing_eia861",
"mergers_eia861",
"net_metering_eia861",
"non_net_metering_eia861",
"operational_data_eia861",
"reliability_eia861",
"utility_data_eia861",
),
'eia923': eia923_pudl_tables,
'epacems': epacems_tables,
'epaipm': epaipm_pudl_tables,
'ferc1': ferc1_pudl_tables,
'ferc714': (
"respondent_id_ferc714",
"id_certification_ferc714",
"gen_plants_ba_ferc714",
"demand_monthly_ba_ferc714",
"net_energy_load_ba_ferc714",
"adjacency_ba_ferc714",
"interchange_ba_ferc714",
"lambda_hourly_ba_ferc714",
"lambda_description_ferc714",
"description_pa_ferc714",
"demand_forecast_pa_ferc714",
"demand_hourly_pa_ferc714",
),
'glue': glue_pudl_tables,
}
"""
dict: A dictionary containing data sources (keys) and the list of associated
tables from that datasource that can be pulled into PUDL (values).
"""
base_data_urls = {
'eia860': 'https://www.eia.gov/electricity/data/eia860',
'eia861': 'https://www.eia.gov/electricity/data/eia861/zip',
'eia923': 'https://www.eia.gov/electricity/data/eia923',
'epacems': 'ftp://newftp.epa.gov/dmdnload/emissions/hourly/monthly',
'ferc1': 'ftp://eforms1.ferc.gov/f1allyears',
'ferc714': 'https://www.ferc.gov/docs-filing/forms/form-714/data',
'ferceqr': 'ftp://eqrdownload.ferc.gov/DownloadRepositoryProd/BulkNew/CSV',
'msha': 'https://arlweb.msha.gov/OpenGovernmentData/DataSets',
'epaipm': 'https://www.epa.gov/sites/production/files/2019-03',
'pudl': 'https://catalyst.coop/pudl/'
}
"""
dict: A dictionary containing data sources (keys) and their base data URLs
(values).
"""
need_fix_inting = {
'plants_steam_ferc1': ('construction_year', 'installation_year'),
'plants_small_ferc1': ('construction_year', 'ferc_license_id'),
'plants_hydro_ferc1': ('construction_year', 'installation_year',),
'plants_pumped_storage_ferc1': ('construction_year', 'installation_year',),
'hourly_emissions_epacems': ('facility_id', 'unit_id_epa',),
}
"""
dict: A dictionary containing tables (keys) and column names (values)
containing integer - type columns whose null values need fixing.
"""
contributors = {
"catalyst-cooperative": {
"title": "Catalyst Cooperative",
"path": "https://catalyst.coop/",
"role": "publisher",
"email": "<EMAIL>",
"organization": "Catalyst Cooperative",
},
"zane-selvans": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://amateurearthling.org/",
"role": "wrangler",
"organization": "Catalyst Cooperative"
},
"christina-gosnell": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"steven-winter": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"alana-wilson": {
"title": "<NAME>",
"email": "<EMAIL>",
"role": "contributor",
"organization": "Catalyst Cooperative",
},
"karl-dunkle-werner": {
"title": "<NAME>",
"email": "<EMAIL>",
"path": "https://karldw.org/",
"role": "contributor",
"organization": "UC Berkeley",
},
'greg-schivley': {
"title": "<NAME>",
"role": "contributor",
},
}
"""
dict: A dictionary of dictionaries containing organization names (keys) and
their attributes (values).
"""
data_source_info = {
"eia860": {
"title": "EIA Form 860",
"path": "https://www.eia.gov/electricity/data/eia860/",
},
"eia861": {
"title": "EIA Form 861",
"path": "https://www.eia.gov/electricity/data/eia861/",
},
"eia923": {
"title": "EIA Form 923",
"path": "https://www.eia.gov/electricity/data/eia923/",
},
"eiawater": {
"title": "EIA Water Use for Power",
"path": "https://www.eia.gov/electricity/data/water/",
},
"epacems": {
"title": "EPA Air Markets Program Data",
"path": "https://ampd.epa.gov/ampd/",
},
"epaipm": {
"title": "EPA Integrated Planning Model",
"path": "https://www.epa.gov/airmarkets/national-electric-energy-data-system-needs-v6",
},
"ferc1": {
"title": "FERC Form 1",
"path": "https://www.ferc.gov/docs-filing/forms/form-1/data.asp",
},
"ferc714": {
"title": "FERC Form 714",
"path": "https://www.ferc.gov/docs-filing/forms/form-714/data.asp",
},
"ferceqr": {
"title": "FERC Electric Quarterly Report",
"path": "https://www.ferc.gov/docs-filing/eqr.asp",
},
"msha": {
"title": "Mining Safety and Health Administration",
"path": "https://www.msha.gov/mine-data-retrieval-system",
},
"phmsa": {
"title": "Pipelines and Hazardous Materials Safety Administration",
"path": "https://www.phmsa.dot.gov/data-and-statistics/pipeline/data-and-statistics-overview",
},
"pudl": {
"title": "The Public Utility Data Liberation Project (PUDL)",
"path": "https://catalyst.coop/pudl/",
"email": "<EMAIL>",
},
}
"""
dict: A dictionary of dictionaries containing datasources (keys) and
associated attributes (values)
"""
contributors_by_source = {
"pudl": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
"karl-dunkle-werner",
],
"eia923": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
],
"eia860": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"ferc1": [
"catalyst-cooperative",
"zane-selvans",
"christina-gosnell",
"steven-winter",
"alana-wilson",
],
"epacems": [
"catalyst-cooperative",
"karl-dunkle-werner",
"zane-selvans",
],
"epaipm": [
"greg-schivley",
],
}
"""
dict: A dictionary of data sources (keys) and lists of contributors (values).
"""
licenses = {
"cc-by-4.0": {
"name": "CC-BY-4.0",
"title": "Creative Commons Attribution 4.0",
"path": "https://creativecommons.org/licenses/by/4.0/"
},
"us-govt": {
"name": "other-pd",
"title": "U.S. Government Work",
"path": "http://www.usa.gov/publicdomain/label/1.0/",
}
}
"""
dict: A dictionary of dictionaries containing license types and their
attributes.
"""
output_formats = [
'sqlite',
'parquet',
'datapkg',
]
"""list: A list of types of PUDL output formats."""
keywords_by_data_source = {
'pudl': [
'us', 'electricity',
],
'eia860': [
'electricity', 'electric', 'boiler', 'generator', 'plant', 'utility',
'fuel', 'coal', 'natural gas', 'prime mover', 'eia860', 'retirement',
'capacity', 'planned', 'proposed', 'energy', 'hydro', 'solar', 'wind',
'nuclear', 'form 860', 'eia', 'annual', 'gas', 'ownership', 'steam',
'turbine', 'combustion', 'combined cycle', 'eia',
'energy information administration'
],
'eia923': [
'fuel', 'boiler', 'generator', 'plant', 'utility', 'cost', 'price',
'natural gas', 'coal', 'eia923', 'energy', 'electricity', 'form 923',
'receipts', 'generation', 'net generation', 'monthly', 'annual', 'gas',
'fuel consumption', 'MWh', 'energy information administration', 'eia',
'mercury', 'sulfur', 'ash', 'lignite', 'bituminous', 'subbituminous',
'heat content'
],
'epacems': [
'epa', 'us', 'emissions', 'pollution', 'ghg', 'so2', 'co2', 'sox',
'nox', 'load', 'utility', 'electricity', 'plant', 'generator', 'unit',
'generation', 'capacity', 'output', 'power', 'heat content', 'mmbtu',
'steam', 'cems', 'continuous emissions monitoring system', 'hourly'
'environmental protection agency', 'ampd', 'air markets program data',
],
'ferc1': [
'electricity', 'electric', 'utility', 'plant', 'steam', 'generation',
'cost', 'expense', 'price', 'heat content', 'ferc', 'form 1',
'federal energy regulatory commission', 'capital', 'accounting',
'depreciation', 'finance', 'plant in service', 'hydro', 'coal',
'natural gas', 'gas', 'opex', 'capex', 'accounts', 'investment',
'capacity'
],
'ferc714': [
'electricity', 'electric', 'utility', 'planning area', 'form 714',
'balancing authority', 'demand', 'system lambda', 'ferc',
'federal energy regulatory commission', "hourly", "generation",
"interchange", "forecast", "load", "adjacency", "plants",
],
'epaipm': [
'epaipm', 'integrated planning',
]
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
ENTITY_TYPE_DICT = {
'M': 'Municipal',
'C': 'Cooperative',
'R': 'Retail Power Marketer',
'I': 'Investor Owned',
'P': 'Political Subdivision',
'T': 'Transmission',
'S': 'State',
'W': 'Wholesale Power Marketer',
'F': 'Federal',
'A': 'Municipal Mktg Authority',
'G': 'Community Choice Aggregator',
'D': 'Nonutility DSM Administrator',
'B': 'Behind the Meter',
'Q': 'Independent Power Producer',
'IND': 'Industrial',
'COM': 'Commercial',
'PR': 'Private', # Added by AES for OD table (Arbitrary moniker)
'PO': 'Power Marketer', # Added by AES for OD table
'U': 'Unknown', # Added by AES for OD table
'O': 'Other' # Added by AES for OD table
}
# Confirm these designations -- educated guess based on the form instructions
MOMENTARY_INTERRUPTION_DEF = { # Added by AES for R table
'L': 'Less than 1 minute',
'F': 'Less than or equal to 5 minutes',
'O': 'Other',
}
# https://www.eia.gov/electricity/data/eia411/#tabs_NERC-3
RECOGNIZED_NERC_REGIONS = [
'BASN', # ASSESSMENT AREA Basin (WECC)
'CALN', # ASSESSMENT AREA California (WECC)
'CALS', # ASSESSMENT AREA California (WECC)
'DSW', # ASSESSMENT AREA Desert Southwest (WECC)
'ASCC', # Alaska
'ISONE', # ISO New England (NPCC)
'ERCOT', # lumped under TRE in 2017 Form instructions
'NORW', # ASSESSMENT AREA Northwest (WECC)
'NYISO', # ISO (NPCC)
'PJM', # RTO
'ROCK', # ASSESSMENT AREA Rockies (WECC)
'ECAR', # OLD RE Now part of RFC and SERC
'FRCC', # included in 2017 Form instructions, recently joined with SERC
'HICC', # Hawaii
'MAAC', # OLD RE Now part of RFC
'MAIN', # OLD RE Now part of SERC, RFC, MRO
'MAPP', # OLD/NEW RE Became part of MRO, resurfaced in 2010
'MRO', # RE included in 2017 Form instructions
'NPCC', # RE included in 2017 Form instructions
'RFC', # RE included in 2017 Form instructions
'SERC', # RE included in 2017 Form instructions
'SPP', # RE included in 2017 Form instructions
'TRE', # RE included in 2017 Form instructions (included ERCOT)
'WECC', # RE included in 2017 Form instructions
'WSCC', # OLD RE pre-2002 version of WECC
'MISO', # ISO unclear whether technically a regional entity, but lots of entries
'ECAR_MAAC',
'MAPP_WECC',
'RFC_SERC',
'SPP_WECC',
'MRO_WECC',
'ERCOT_SPP',
'SPP_TRE',
'ERCOT_TRE',
'MISO_TRE',
'VI', # Virgin Islands
'GU', # Guam
'PR', # Puerto Rico
'AS', # American Samoa
'UNK',
]
CUSTOMER_CLASSES = [
"commercial",
"industrial",
"direct_connection",
"other",
"residential",
"total",
"transportation"
]
TECH_CLASSES = [
'backup', # WHERE Is this used? because removed from DG table b/c not a real component
'chp_cogen',
'combustion_turbine',
'fuel_cell',
'hydro',
'internal_combustion',
'other',
'pv',
'steam',
'storage_pv',
'all_storage', # need 'all' as prefix so as not to confuse with other storage category
'total',
'virtual_pv',
'wind',
]
REVENUE_CLASSES = [
'retail_sales',
'unbundled',
'delivery_customers',
'sales_for_resale',
'credits_or_adjustments',
'other',
'transmission',
'total',
]
RELIABILITY_STANDARDS = [
'ieee_standard',
'other_standard'
]
FUEL_CLASSES = [
'gas',
'oil',
'other',
'renewable',
'water',
'wind',
'wood',
]
RTO_CLASSES = [
'caiso',
'ercot',
'pjm',
'nyiso',
'spp',
'miso',
'isone',
'other'
]
ESTIMATED_OR_ACTUAL = {'E': 'estimated', 'A': 'actual'}
TRANSIT_TYPE_DICT = {
'CV': 'conveyer',
'PL': 'pipeline',
'RR': 'railroad',
'TK': 'truck',
'WA': 'water',
'UN': 'unknown',
}
"""dict: A dictionary of datasets (keys) and keywords (values). """
column_dtypes = {
"ferc1": { # Obviously this is not yet a complete list...
"construction_year": pd.Int64Dtype(),
"installation_year": pd.Int64Dtype(),
"plant_id_ferc1": pd.Int64Dtype(),
"plant_id_pudl": pd.Int64Dtype(),
"report_date": "datetime64[ns]",
"report_year": pd.Int64Dtype(),
"utility_id_ferc1": pd.Int64Dtype(),
"utility_id_pudl": pd.Int64Dtype(),
},
"ferc714": { # INCOMPLETE
"demand_mwh": float,
"demand_annual_mwh": float,
"eia_code": pd.Int64Dtype(),
"peak_demand_summer_mw": float,
"peak_demand_winter_mw": float,
"report_date": "datetime64[ns]",
"respondent_id_ferc714": pd.Int64Dtype(),
"respondent_name_ferc714": pd.StringDtype(),
"respondent_type": pd.CategoricalDtype(categories=[
"utility", "balancing_authority",
]),
"timezone": pd.CategoricalDtype(categories=[
"America/New_York", "America/Chicago", "America/Denver",
"America/Los_Angeles", "America/Anchorage", "Pacific/Honolulu"]),
"utc_datetime": "datetime64[ns]",
},
"epacems": {
'state': pd.StringDtype(),
'plant_id_eia': pd.Int64Dtype(), # Nullable Integer
'unitid': pd.StringDtype(),
'operating_datetime_utc': "datetime64[ns]",
'operating_time_hours': float,
'gross_load_mw': float,
'steam_load_1000_lbs': float,
'so2_mass_lbs': float,
'so2_mass_measurement_code': pd.StringDtype(),
'nox_rate_lbs_mmbtu': float,
'nox_rate_measurement_code': pd.StringDtype(),
'nox_mass_lbs': float,
'nox_mass_measurement_code': pd.StringDtype(),
'co2_mass_tons': float,
'co2_mass_measurement_code': pd.StringDtype(),
'heat_content_mmbtu': float,
'facility_id': pd.Int64Dtype(), # Nullable Integer
'unit_id_epa': pd.Int64Dtype(), # Nullable Integer
},
"eia": {
'actual_peak_demand_savings_mw': float, # Added by AES for DR table
'address_2': pd.StringDtype(), # Added by AES for 860 utilities table
'advanced_metering_infrastructure': pd.Int64Dtype(), # Added by AES for AMI table
# Added by AES for UD misc table
'alternative_fuel_vehicle_2_activity': pd.BooleanDtype(),
'alternative_fuel_vehicle_activity': pd.BooleanDtype(),
'annual_indirect_program_cost': float,
'annual_total_cost': float,
'ash_content_pct': float,
'ash_impoundment': pd.BooleanDtype(),
'ash_impoundment_lined': pd.BooleanDtype(),
# TODO: convert this field to more descriptive words
'ash_impoundment_status': pd.StringDtype(),
'associated_combined_heat_power': pd.BooleanDtype(),
'attention_line': pd.StringDtype(),
'automated_meter_reading': pd.Int64Dtype(), # Added by AES for AMI table
'backup_capacity_mw': float, # Added by AES for NNM & DG misc table
'balancing_authority_code_eia': pd.CategoricalDtype(),
'balancing_authority_id_eia': pd.Int64Dtype(),
'balancing_authority_name_eia': pd.StringDtype(),
'bga_source': pd.StringDtype(),
'boiler_id': pd.StringDtype(),
'bunded_activity': pd.BooleanDtype(),
'business_model': pd.CategoricalDtype(categories=[
"retail", "energy_services"]),
'buy_distribution_activity': pd.BooleanDtype(),
'buying_transmission_activity': pd.BooleanDtype(),
'bypass_heat_recovery': pd.BooleanDtype(),
'caidi_w_major_event_days_minus_loss_of_service_minutes': float,
'caidi_w_major_event_dats_minutes': float,
'caidi_wo_major_event_days_minutes': float,
'capacity_mw': float,
'carbon_capture': pd.BooleanDtype(),
'chlorine_content_ppm': float,
'circuits_with_voltage_optimization': pd.Int64Dtype(),
'city': pd.StringDtype(),
'cofire_fuels': pd.BooleanDtype(),
'consumed_by_facility_mwh': float,
'consumed_by_respondent_without_charge_mwh': float,
'contact_firstname': pd.StringDtype(),
'contact_firstname_2': pd.StringDtype(),
'contact_lastname': pd.StringDtype(),
'contact_lastname_2': pd.StringDtype(),
'contact_title': pd.StringDtype(),
'contact_title_2': pd.StringDtype(),
'contract_expiration_date': 'datetime64[ns]',
'contract_type_code': pd.StringDtype(),
'county': pd.StringDtype(),
'county_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'credits_or_adjustments': float,
'critical_peak_pricing': pd.BooleanDtype(),
'critical_peak_rebate': pd.BooleanDtype(),
'current_planned_operating_date': 'datetime64[ns]',
'customers': float,
'customer_class': pd.CategoricalDtype(categories=CUSTOMER_CLASSES),
'customer_incentives_cost': float,
'customer_incentives_incremental_cost': float,
'customer_incentives_incremental_life_cycle_cost': float,
'customer_other_costs_incremental_life_cycle_cost': float,
'daily_digital_access_customers': pd.Int64Dtype(),
'data_observed': pd.BooleanDtype(),
'datum': pd.StringDtype(),
'deliver_power_transgrid': pd.BooleanDtype(),
'delivery_customers': float,
'direct_load_control_customers': pd.Int64Dtype(),
'distributed_generation': pd.BooleanDtype(),
'distributed_generation_owned_capacity_mw': float,
'distribution_activity': pd.BooleanDtype(),
'distribution_circuits': pd.Int64Dtype(),
'duct_burners': pd.BooleanDtype(),
'energy_displaced_mwh': float,
'energy_efficiency_annual_cost': float,
'energy_efficiency_annual_actual_peak_reduction_mw': float,
'energy_efficiency_annual_effects_mwh': float,
'energy_efficiency_annual_incentive_payment': float,
'energy_efficiency_incremental_actual_peak_reduction_mw': float,
'energy_efficiency_incremental_effects_mwh': float,
'energy_savings_estimates_independently_verified': pd.BooleanDtype(),
'energy_savings_independently_verified': pd.BooleanDtype(),
'energy_savings_mwh': float,
'energy_served_ami_mwh': float,
'energy_source_1_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_1_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_1': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_2': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_2_transport_3': pd.CategoricalDtype(categories=TRANSIT_TYPE_DICT.values()),
'energy_source_code': pd.StringDtype(),
'energy_source_code_1': pd.StringDtype(),
'energy_source_code_2': pd.StringDtype(),
'energy_source_code_3': pd.StringDtype(),
'energy_source_code_4': pd.StringDtype(),
'energy_source_code_5': pd.StringDtype(),
'energy_source_code_6': pd.StringDtype(),
'energy_storage': pd.BooleanDtype(),
'entity_type': pd.CategoricalDtype(categories=ENTITY_TYPE_DICT.values()),
'estimated_or_actual_capacity_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_fuel_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'estimated_or_actual_tech_data': pd.CategoricalDtype(categories=ESTIMATED_OR_ACTUAL.values()),
'exchange_energy_delivered_mwh': float,
'exchange_energy_recieved_mwh': float,
'ferc_cogen_docket_no': pd.StringDtype(),
'ferc_cogen_status': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator': pd.BooleanDtype(),
'ferc_exempt_wholesale_generator_docket_no': pd.StringDtype(),
'ferc_small_power_producer': pd.BooleanDtype(),
'ferc_small_power_producer_docket_no': pd.StringDtype(),
'fluidized_bed_tech': pd.BooleanDtype(),
'fraction_owned': float,
'fuel_class': pd.StringDtype(),
'fuel_consumed_for_electricity_mmbtu': float,
'fuel_consumed_for_electricity_units': float,
'fuel_consumed_mmbtu': float,
'fuel_consumed_units': float,
'fuel_cost_per_mmbtu': float,
'fuel_group_code': pd.StringDtype(),
'fuel_group_code_simple': pd.StringDtype(),
'fuel_mmbtu_per_unit': float,
'fuel_pct': float,
'fuel_qty_units': float,
# are fuel_type and fuel_type_code the same??
# fuel_type includes 40 code-like things.. WAT, SUN, NUC, etc.
'fuel_type': pd.StringDtype(),
# from the boiler_fuel_eia923 table, there are 30 code-like things, like NG, BIT, LIG
'fuel_type_code': pd.StringDtype(),
'fuel_type_code_aer': pd.StringDtype(),
'fuel_type_code_pudl': pd.StringDtype(),
'furnished_without_charge_mwh': float,
'generation_activity': pd.BooleanDtype(),
# this is a mix of integer-like values (2 or 5) and strings like AUGSF
'generator_id': pd.StringDtype(),
'generators_number': float,
'generators_num_less_1_mw': float,
'green_pricing_revenue': float,
'grid_voltage_2_kv': float,
'grid_voltage_3_kv': float,
'grid_voltage_kv': float,
'heat_content_mmbtu_per_unit': float,
'highest_distribution_voltage_kv': float,
'home_area_network': pd.Int64Dtype(),
'inactive_accounts_included': pd.BooleanDtype(),
'incremental_energy_savings_mwh': float,
'incremental_life_cycle_energy_savings_mwh': float,
'incremental_life_cycle_peak_reduction_mwh': float,
'incremental_peak_reduction_mw': float,
'iso_rto_code': pd.StringDtype(),
'latitude': float,
'liquefied_natural_gas_storage': pd.BooleanDtype(),
'load_management_annual_cost': float,
'load_management_annual_actual_peak_reduction_mw': float,
'load_management_annual_effects_mwh': float,
'load_management_annual_incentive_payment': float,
'load_management_annual_potential_peak_reduction_mw': float,
'load_management_incremental_actual_peak_reduction_mw': float,
'load_management_incremental_effects_mwh': float,
'load_management_incremental_potential_peak_reduction_mw': float,
'longitude': float,
'major_program_changes': pd.BooleanDtype(),
'mercury_content_ppm': float,
'merge_address': pd.StringDtype(),
'merge_city': pd.StringDtype(),
'merge_company': pd.StringDtype(),
'merge_date': 'datetime64[ns]',
'merge_state': pd.StringDtype(),
'mine_id_msha': pd.Int64Dtype(),
'mine_id_pudl': pd.Int64Dtype(),
'mine_name': pd.StringDtype(),
'mine_type_code': pd.StringDtype(),
'minimum_load_mw': float,
'moisture_content_pct': float,
'momentary_interruption_definition': pd.CategoricalDtype(categories=MOMENTARY_INTERRUPTION_DEF.values()),
'multiple_fuels': pd.BooleanDtype(),
'nameplate_power_factor': float,
'natural_gas_delivery_contract_type_code': pd.StringDtype(),
'natural_gas_local_distribution_company': pd.StringDtype(),
'natural_gas_pipeline_name_1': pd.StringDtype(),
'natural_gas_pipeline_name_2': pd.StringDtype(),
'natural_gas_pipeline_name_3': pd.StringDtype(),
'natural_gas_storage': pd.BooleanDtype(),
'natural_gas_transport_code': pd.StringDtype(),
'nerc_region': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'nerc_regions_of_operation': pd.CategoricalDtype(categories=RECOGNIZED_NERC_REGIONS),
'net_generation_mwh': float,
'net_metering': pd.BooleanDtype(),
'net_power_exchanged_mwh': float,
'net_wheeled_power_mwh': float,
'new_parent': pd.StringDtype(),
'non_amr_ami': pd.Int64Dtype(),
'nuclear_unit_id': pd.Int64Dtype(),
'operates_generating_plant': pd.BooleanDtype(),
'operating_date': 'datetime64[ns]',
'operating_switch': pd.StringDtype(),
# TODO: double check this for early 860 years
'operational_status': pd.StringDtype(),
'operational_status_code': pd.StringDtype(),
'original_planned_operating_date': 'datetime64[ns]',
'other': float,
'other_combustion_tech': pd.BooleanDtype(),
'other_costs': float,
'other_costs_incremental_cost': float,
'other_modifications_date': 'datetime64[ns]',
'other_planned_modifications': pd.BooleanDtype(),
'outages_recorded_automatically': pd.BooleanDtype(),
'owned_by_non_utility': pd.BooleanDtype(),
'owner_city': pd.StringDtype(),
'owner_name': pd.StringDtype(),
'owner_state': pd.StringDtype(),
'owner_street_address': pd.StringDtype(),
'owner_utility_id_eia': pd.Int64Dtype(),
'owner_zip_code': pd.StringDtype(),
# we should transition these into readable codes, not a one letter thing
'ownership_code': pd.StringDtype(),
'phone_extension_1': pd.StringDtype(),
'phone_extension_2': pd.StringDtype(),
'phone_number_1': pd.StringDtype(),
'phone_number_2': pd.StringDtype(),
'pipeline_notes': pd.StringDtype(),
'planned_derate_date': 'datetime64[ns]',
'planned_energy_source_code_1': pd.StringDtype(),
'planned_modifications': pd.BooleanDtype(),
'planned_net_summer_capacity_derate_mw': float,
'planned_net_summer_capacity_uprate_mw': float,
'planned_net_winter_capacity_derate_mw': float,
'planned_net_winter_capacity_uprate_mw': float,
'planned_new_capacity_mw': float,
'planned_new_prime_mover_code': pd.StringDtype(),
'planned_repower_date': 'datetime64[ns]',
'planned_retirement_date': 'datetime64[ns]',
'planned_uprate_date': 'datetime64[ns]',
'plant_id_eia': pd.Int64Dtype(),
'plant_id_epa': pd.Int64Dtype(),
'plant_id_pudl': pd.Int64Dtype(),
'plant_name_eia': pd.StringDtype(),
'plants_reported_asset_manager': pd.BooleanDtype(),
'plants_reported_operator': pd.BooleanDtype(),
'plants_reported_other_relationship': pd.BooleanDtype(),
'plants_reported_owner': pd.BooleanDtype(),
'point_source_unit_id_epa': pd.StringDtype(),
'potential_peak_demand_savings_mw': float,
'pulverized_coal_tech': pd.BooleanDtype(),
'previously_canceled': pd.BooleanDtype(),
'price_responsive_programes': pd.BooleanDtype(),
'price_responsiveness_customers': pd.Int64Dtype(),
'primary_transportation_mode_code': pd.StringDtype(),
'primary_purpose_naics_id': pd.Int64Dtype(),
'prime_mover_code': pd.StringDtype(),
'pv_current_flow_type': pd.CategoricalDtype(categories=['AC', 'DC']),
'reactive_power_output_mvar': float,
'real_time_pricing_program': pd.BooleanDtype(),
'rec_revenue': float,
'rec_sales_mwh': float,
'regulatory_status_code': pd.StringDtype(),
'report_date': 'datetime64[ns]',
'reported_as_another_company': pd.StringDtype(),
'retail_marketing_activity': pd.BooleanDtype(),
'retail_sales': float,
'retail_sales_mwh': float,
'retirement_date': 'datetime64[ns]',
'revenue_class': pd.CategoricalDtype(categories=REVENUE_CLASSES),
'rto_iso_lmp_node_id': pd.StringDtype(),
'rto_iso_location_wholesale_reporting_id': pd.StringDtype(),
'rtos_of_operation': pd.StringDtype(),
'saidi_w_major_event_dats_minus_loss_of_service_minutes': float,
'saidi_w_major_event_days_minutes': float,
'saidi_wo_major_event_days_minutes': float,
'saifi_w_major_event_days_customers': float,
'saifi_w_major_event_days_minus_loss_of_service_customers': float,
'saifi_wo_major_event_days_customers': float,
'sales_for_resale': float,
'sales_for_resale_mwh': float,
'sales_mwh': float,
'sales_revenue': float,
'sales_to_ultimate_consumers_mwh': float,
'secondary_transportation_mode_code': pd.StringDtype(),
'sector_id': pd.Int64Dtype(),
'sector_name': pd.StringDtype(),
'service_area': pd.StringDtype(),
'service_type': pd.CategoricalDtype(categories=[
"bundled", "energy", "delivery",
]),
'short_form': pd.BooleanDtype(),
'sold_to_utility_mwh': float,
'solid_fuel_gasification': pd.BooleanDtype(),
'data_source': pd.StringDtype(),
'standard': pd.CategoricalDtype(categories=RELIABILITY_STANDARDS),
'startup_source_code_1': pd.StringDtype(),
'startup_source_code_2': pd.StringDtype(),
'startup_source_code_3': pd.StringDtype(),
'startup_source_code_4': pd.StringDtype(),
'state': pd.StringDtype(),
'state_id_fips': pd.StringDtype(), # Must preserve leading zeroes
'street_address': pd.StringDtype(),
'stoker_tech': pd.BooleanDtype(),
'storage_capacity_mw': float,
'storage_customers': pd.Int64Dtype(),
'subcritical_tech': pd.BooleanDtype(),
'sulfur_content_pct': float,
'summer_capacity_mw': float,
'summer_capacity_estimate': pd.BooleanDtype(),
# TODO: check if there is any data pre-2016
'summer_estimated_capability_mw': float,
'summer_peak_demand_mw': float,
'supercritical_tech': pd.BooleanDtype(),
'supplier_name': pd.StringDtype(),
'switch_oil_gas': pd.BooleanDtype(),
'syncronized_transmission_grid': pd.BooleanDtype(),
# Added by AES for NM & DG tech table (might want to consider merging with another fuel label)
'tech_class': pd.CategoricalDtype(categories=TECH_CLASSES),
'technology_description': pd.StringDtype(),
'time_cold_shutdown_full_load_code': pd.StringDtype(),
'time_of_use_pricing_program': pd.BooleanDtype(),
'time_responsive_programs': pd.BooleanDtype(),
'time_responsiveness_customers': pd.Int64Dtype(),
'timezone': pd.StringDtype(),
'topping_bottoming_code': pd.StringDtype(),
'total': float,
'total_capacity_less_1_mw': float,
'total_meters': pd.Int64Dtype(),
'total_disposition_mwh': float,
'total_energy_losses_mwh': float,
'total_sources_mwh': float,
'transmission': float,
'transmission_activity': pd.BooleanDtype(),
'transmission_by_other_losses_mwh': float,
'transmission_distribution_owner_id': pd.Int64Dtype(),
'transmission_distribution_owner_name': pd.StringDtype(),
'transmission_distribution_owner_state': pd.StringDtype(),
'turbines_inverters_hydrokinetics': float,
'turbines_num': pd.Int64Dtype(), # TODO: check if any turbines show up pre-2016
'ultrasupercritical_tech': pd.BooleanDtype(),
'unbundled_revenues': float,
'unit_id_eia': pd.StringDtype(),
'unit_id_pudl': pd.Int64Dtype(),
'uprate_derate_completed_date': 'datetime64[ns]',
'uprate_derate_during_year': pd.BooleanDtype(),
'utility_id_eia': pd.Int64Dtype(),
'utility_id_pudl': pd.Int64Dtype(),
'utility_name_eia': pd.StringDtype(),
'utility_owned_capacity_mw': float, # Added by AES for NNM table
'variable_peak_pricing_program': pd.BooleanDtype(), # Added by AES for DP table
'virtual_capacity_mw': float, # Added by AES for NM table
'virtual_customers': | pd.Int64Dtype() | pandas.Int64Dtype |
# -*- coding: utf-8 -*-
import pandas as pd
import torch
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from deepctr_torch.inputs import SparseFeat, DenseFeat, get_feature_names
from deepctr_torch.models import *
from torch import nn
import numpy as np
import argparse
import yaml
import json
import pdb
import sys
import shutil
from torch.utils.tensorboard import SummaryWriter
def count_parameters(model):
total_para = 0
strr = "PARAMETERS:\n"
for name,parameters in model.named_parameters():
strr += name + ':' + str(parameters.size()) + "\n"
total_para += parameters.numel()
strr += "Total:" + str(total_para) + "\n"
return strr
def load_data_in_df(args, config):
sparse_features = ['C' + str(i) for i in range(1, 27)]
dense_features = ['I' + str(i) for i in range(1, 14)]
target = ['label']
if args.test_run:
print("SAMPLE RUN...")
df = pd.read_csv('/home/apd10/DeepCTR-Torch/examples/criteo_sample.txt')
#df = pd.read_csv('/home/apd10/dlrm/dlrm/input/small_data.csv')
df[sparse_features] = df[sparse_features].fillna('-1', )
df[dense_features] = df[dense_features].fillna(0, )
# 1.Label Encoding for sparse features,and do simple Transformation for dense features
for feat in sparse_features:
lbe = LabelEncoder()
df[feat] = lbe.fit_transform(df[feat])
mms = MinMaxScaler(feature_range=(0, 1))
df[dense_features] = mms.fit_transform(df[dense_features])
else:
handle = np.load("/home/apd10/dlrm/dlrm/input/data.npz")
if "data" in config:
if config["data"]["type"] == "le2":
print("Using le2 data")
handle = np.load("/home/apd10/dlrm/dlrm/input/data.le2.npz")
intdf = pd.DataFrame(handle['intF'], columns = dense_features)
catdf = pd.DataFrame(handle['catF'], columns = sparse_features)
labeldf = pd.DataFrame(handle['label'], columns = target)
df = | pd.concat([labeldf, intdf, catdf], axis=1) | pandas.concat |
#!/usr/bin/env python3
#
# C19Web.py
#
# C19Web is a web application written in Python and using
# Streamlit as the presentation method and Streamlit Share
# make it generally available.
#
# The structure of this program has all the Streamlit code
# in the main program because of Streamlit requirements.
#
import datetime
from datetime import timedelta
#from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil import parser
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
import streamlit as st
import urllib
from streamlit.elements.doc_string import CONFUSING_STREAMLIT_MODULES
st.set_page_config(
page_title="B.C. Covid-19",
page_icon="😷",
layout="centered",
initial_sidebar_state="auto",
)
# 'country', 'province', 'lastDate'
# 'latitude', 'longitude'
# 'dates'
# 'confirmed', 'confirmedNew', 'confirmedNewMean'
# 'deaths', 'deathsNew', 'deathsNewMean'
index_url_csv = 'https://jpaulhart.github.io/Index.csv'
# "Date", "Region", "New_Tests", "Total_Tests", "Positivity", "Turn_Around"
bc_tests_url = 'http://www.bccdc.ca/Health-Info-Site/Documents/BCCDC_COVID19_Dashboard_Lab_Information.csv'
# "Reported_Date","HA","Sex","Age_Group","Classification_Reported"
bc_cases_url = 'http://www.bccdc.ca/Health-Info-Site/Documents/BCCDC_COVID19_Dashboard_Case_Details.csv'
# "Date","Province","HA","HSDA","Cases_Reported","Cases_Reported_Smoothed"
bc_regional_url = 'http://www.bccdc.ca/Health-Info-Site/Documents/BCCDC_COVID19_Regional_Summary_Data.csv'
base_url = 'https://jpaulhart.github.io'
# #######################################################################################
# Read data and cache
# #######################################################################################
@st.cache
def read_csv(url):
fixed_url = url.replace(' ', '%20')
return | pd.read_csv(fixed_url) | pandas.read_csv |
import abc
import pandas as pd
import numpy as np
from numbers import Number
from itertools import combinations
from statsmodels.formula.api import ols
from functools import partial
from .utils import NumericFunction
from .assignment import get_assignments_as_positions
def identity(x): return x
def max_absolute_value(x): return np.max(np.abs(x))
min_across_covariates = partial(np.min, axis=1)
max_across_covariates = partial(np.max, axis=1)
mean_across_covariates = partial(np.mean, axis=1)
class BalanceObjective:
def __init__(self, cols=None):
self._cols = cols
def col_selection(self, df):
return self._cols or df.columns
@property
def balance_func(self):
return NumericFunction.numerize(self._balance_func)
@abc.abstractmethod
def _balance_func(self, df, assignments):
""""""
@classmethod
def assignment_indices(cls, df, assignments):
idxs = cls._idxs_from_assignment(df, assignments)
return cls._append_complementary_assignment(idxs)
@classmethod
def _idxs_from_assignment(cls, df, assignments):
if len(assignments[0]) == len(df.index):
return assignments
else:
return [df.index.isin(a) for a in assignments]
@classmethod
def _append_complementary_assignment(cls, idxs):
total_assignments = np.add(*idxs) if len(idxs) > 1 else idxs[0]
if not min(total_assignments):
idxs.append(np.logical_not(total_assignments))
return idxs
class MahalanobisBalance(BalanceObjective):
def __init__(self, treatment_aggregator=identity, cols=None):
self.treatment_aggregator = treatment_aggregator
super().__init__(cols)
def _balance_func(self, df, assignments):
df_sel = df[self.col_selection(df)]
inverse_cov = np.linalg.inv(df_sel.cov())
means = [df_sel.loc[idx].mean() for idx in
self.assignment_indices(df_sel, assignments)]
combs = list(combinations(range(len(means)), 2))
mean_diffs = [means[a] - means[b] for a, b in combs]
res = pd.DataFrame(data=[mean_diff @ inverse_cov @ mean_diff
for mean_diff in mean_diffs],
index=['{}-{}'.format(a, b) for a, b in combs])
return -self.treatment_aggregator(res)
def mahalanobis_balance(cols=None):
return MahalanobisBalance(np.max, cols=cols).balance_func
class PValueBalance(BalanceObjective):
def __init__(self, treatment_aggreagtor=identity,
covariate_aggregator=identity, cols=None):
self.treatment_aggregator = treatment_aggreagtor
self.covariate_aggregator = covariate_aggregator
super().__init__(cols)
def _balance_func(self, df, assignments):
pvalues = dict((col, self.pvalues_by_col(
col, df, assignments)) for col in self.col_selection(df))
return self.covariate_aggregator(pd.DataFrame(pvalues))
def pvalues_by_col(self, col, df, assignments):
pv = self.treatment_aggregator(self.ols_col_on_treatment(
col, df, assignments).pvalues.iloc[1:].values)
if isinstance(pv, Number):
pv = [pv]
return pv
def ols_col_on_treatment(self, col, df, assignments):
t_dummies = pd.DataFrame(
dict(('t{}'.format(i), df.index.isin(assignment))
for i, assignment in enumerate(assignments)))
data = pd.concat((df, t_dummies), axis=1)
sel_dummies = self._get_non_collinear_dummies(t_dummies)
formula = '{} ~ 1 + {}'.format(col, ' + '.join(sel_dummies))
return ols(formula, data=data).fit()
def _get_non_collinear_dummies(self, t_dummies):
is_collinear = int(t_dummies.sum(axis=1).mean()) == 1
if is_collinear:
return t_dummies.columns[:-1]
else:
return t_dummies.columns
def pvalues_report(df, assignments):
if isinstance(assignments, pd.DataFrame):
assignments = get_assignments_as_positions(assignments)
report = PValueBalance().balance_func(df, assignments)
idx = ['t{}'.format(i + 1) for i in range(len(report))]
report.index = idx
return report
def pvalue_balance(cols=None):
return PValueBalance(
treatment_aggreagtor=np.min,
covariate_aggregator=min_across_covariates,
cols=cols
).balance_func
class BlockBalance(BalanceObjective):
def __init__(self, treatment_aggreagtor=identity,
covariate_aggregator=identity,
category_aggregator=max_absolute_value, cols=None):
self.treatment_aggregator = treatment_aggreagtor
self.covariate_aggregator = covariate_aggregator
self.category_aggregator = category_aggregator
super().__init__(cols)
def _balance_func(self, df, assignments):
relative_count_all = dict((col, self.relative_count_by_col(
col, df, assignments)) for col in self.col_selection(df))
return -self.covariate_aggregator( | pd.DataFrame(relative_count_all) | pandas.DataFrame |
#Resume Filter code
#importing all required libraries
import PyPDF2
import os
from os import listdir
from os.path import isfile, join
from io import StringIO
import pandas as pd
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
from spacy.matcher import PhraseMatcher
#Function to read resumes from the folder one by one
mypath="/home/usr/Resume/app" #enter your path here where you saved the resumes in Linux
# mypath="c:/home/usr/Resume/app" #use for windows, repeat for line
onlyfiles = [os.path.join(mypath, f) for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f))]
def pdfextract(file):
fileReader = PyPDF2.PdfFileReader(open(file,'rb'))
countpage = fileReader.getNumPages()
count = 0
text = []
while count < countpage:
pageObj = fileReader.getPage(count)
count +=1
t = pageObj.extractText()
print (t)
text.append(t)
return text
#function to read resume ends
#function that does phrase matching and builds a candidate profile
def create_profile(file):
text = pdfextract(file)
text = str(text)
text = text.replace("\\n", "")
text = text.lower()
#below is the csv where we have all the keywords, you can customize your own
keyword_dict = | pd.read_csv('/home/usr/Resume/template/template.csv', encoding='ISO-8859-1') | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_FirstDemoIntro.ipynb (unless otherwise specified).
__all__ = ['objToCat', 'sampleDF']
# Cell
import pandas as pd
# Cell
def objToCat(df):
'''
This function make objective column to categories and replace with int8 code
Note: *This Function Need import DataFrame with pandas*
``` python
### Example
>>> dataframe.info()
>>> i Name_Column Len Dtype: object
...
>>> objToCat(dataframe)
>>> dataframe.info()
>>> i Name_Column Len Dtype: int8
```
Sign: mkarimi21
'''
len_of_type = len(df.dtypes)
list_of_type = list(df.dtypes)
object_type = []
for i in range(len_of_type):
if list_of_type[i] == 'object':
object_type.append(df.columns[i])
for i in range(len(object_type)):
df[object_type[i]] = df[object_type[i]].astype('category').cat.codes
return df
# Cell
import sweetviz as sv
# Cell
def sampleDF(df, column):
'''
Clean Data and make equal labale for classfication
``` python
# Example
>>> list(DataFrame['Column'].value_counts())
>>> [Alpha, Beta, ...]
...
>>> a = sampleDF(DataFrame, 'Column')
>>> list(a.value_counts())
>>> [N, N, N, ...]
```
Sign: mkarimi21
'''
# mport library
from random import randint
# define column
a = str(column)
# 5% data
one_percentage = (len(df[a]) // 100) * 5
# sample data from over 1 percentage data
df_sample = []
len_of_count = list(range(len(df[a].value_counts())))
list_count = list(df[a].value_counts())
# add min count of data to sampling and create sample dataframe
min_df = []
for i in range(len(list_count)):
if list_count[i] >= one_percentage:
min_df.append(list_count[i])
min_df_count = min(min_df)
rndom_state = randint(1, 100)
for i in range(len(len_of_count)):
if len(df[df[a] == i]) > one_percentage:
df_sample.append(df[df[a] == i].sample(n=min_df_count, random_state=rndom_state))
split_data = | pd.concat(df_sample, axis=0) | pandas.concat |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
from app.chat.emoji import get_score_from_emoji
import pandas as pd
import json
BATCH = 20
LABELS_SCORES = {
"NEU": 0.5,
"POS": 0.99,
"NEG": 0.01
}
class Model:
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained("finiteautomata/beto-sentiment-analysis")
self.model = AutoModelForSequenceClassification.from_pretrained("finiteautomata/beto-sentiment-analysis")
self.nlp = pipeline('sentiment-analysis', model=self.model, tokenizer=self.tokenizer)
def predict(self, sentence):
return self.nlp(sentence)
def generate_df_with_scores(self, telegram_chat):
df = | pd.DataFrame(telegram_chat["messages"]) | pandas.DataFrame |
import pdal
import json
import sys
import pandas as pd
from shapely.geometry import Point
import geopandas as gpd
def get_region_bound(Region :str , bound : str ,):
'''
This function is going to promt a user to enter the desired region and bound
The out put expectd is : the Region, the bound, the aws public path, the laz output file path, the tif file path and the csv file path
'''
PUBLIC_DATA_PATH = "https://s3-us-west-2.amazonaws.com/usgs-lidar-public/"
pipeline_path = '../scripts/get_data.json'
print ('===================REQUESTING REGION =====================')
Region = (input("Enter the Region : ")) #IA_FullState
print (Region)
print ('===================REQUESTING BOUNDS =====================')
bound = (input("Enter the Bounds [MINX, MINY, MAXX, MAXY] : ")) #bound = "([-11669524.7, -11666600.8], [4776607.3, 4778714.4])"
print(bound)
print ('===================REQUEST PATH =====================')
access_path = PUBLIC_DATA_PATH + Region + '/ept.json'
print(access_path)
print ('===================OUPUT LAZ PATH =====================')
output_filename_laz = "../laz/"+Region+".laz"
print(output_filename_laz)
print ('===================OUPUT TIF PATH =====================')
output_filename_tif = "../tif/"+Region+".tif"
print(output_filename_tif)
print ('===================OUPUT CSV PATH =====================')
output_filename_csv = "../csv/"+Region+".csv"
print(output_filename_csv)
return Region, bound, access_path, output_filename_laz, output_filename_tif,output_filename_csv,pipeline_path
#Region = 'USGS_LPC_CO_SoPlatteRiver_Lot5_2013_LAS_2015/'
#bound = "([-11669524.7, -11666600.8], [4776607.3, 4778714.4])"
#([-93.756155, 41.918015],[ -93.747334, 41.921429])
#access_path = PUBLIC_DATA_PATH + Region + 'ept.json'
#output_filename_laz = '../laz/USGS_LPC_CO_SoPlatteRiver_Lot5_2013_LAS_2015.laz'
#ouput_filename_tif = '../tif/USGS_LPC_CO_SoPlatteRiver_Lot5_2013_LAS_2015.tif'
#pipeline_path = '../scripts/get_data.json'
def get_raster_terrain(region ,bounds , access_path , output_filename_laz,ouput_filename_tif,output_filename_csv,pipeline_path):#output_filename_geojson
"""
Queries the pdal json pipelin and fill the bound and filename paths accordingly then generated the data
"""
with open(pipeline_path) as json_file:
the_json = json.load(json_file)
print ('===================filling pdal json file =====================')
the_json['pipeline'][0]['bounds']=bounds
the_json['pipeline'][0]['filename']=access_path
the_json['pipeline'][6]['filename']=output_filename_laz
the_json['pipeline'][7]['filename']=ouput_filename_tif
the_json['pipeline'][8]['filename']=output_filename_csv
print ('===================dumping the json file, saving .tif,.laz.csv =====================')
pipeline = pdal.Pipeline(json.dumps(the_json))
try:
exxec = pipeline.execute()
metadata = pipeline.metadata
except RuntimeError as e :
print(e)
pass
def geometry (csvpath,region):
"""
takes the cvs output file and generates a geodataframe with the elevation and coordinates of every point
"""
print ('===================READING CSV =====================')
pan = pd.read_csv(csvpath)
geo = pan[["X","Y","Z"]]
#convert xyz to geodataframe
geometry = [Point(xy) for xy in zip(pd.to_numeric(geo['X']), | pd.to_numeric(geo['Y']) | pandas.to_numeric |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_stack_empty_frame(dropna):
# GH 36113
expected = Series(index=MultiIndex([[], []], [[], []]), dtype=np.float64)
result = DataFrame(dtype=np.float64).stack(dropna=dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value):
# GH 36113
result = (
DataFrame(dtype=np.int64).stack(dropna=dropna).unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_positional_level_duplicate_column_names():
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
class TestStackUnstackMultiLevel:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
def test_stack(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2)
expected = ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(
np.arange(12).reshape(4, 3),
index=list("abab"),
columns=["1st", "2nd", "3rd"],
)
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd", "3rd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ["1st", "2nd", "1st"]
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thu,Dinner,No,3.0,1
Thu,Lunch,No,117.32,44
Thu,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df["foo"].stack().sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack()
assert restacked.index.names == frame.index.names
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake")
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake")
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp")
expected = frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp")
expected = frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
tm.assert_frame_equal(s_unstacked, expected["A"])
restacked = unstacked.stack(["year", "month"])
restacked = restacked.swaplevel(0, 1).swaplevel(1, 2)
restacked = restacked.sort_index(level=0)
tm.assert_frame_equal(restacked, ymd)
assert restacked.index.names == ymd.index.names
# GH #451
unstacked = ymd.unstack([1, 2])
expected = ymd.unstack(1).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
unstacked = ymd.unstack([2, 1])
expected = ymd.unstack(2).unstack(1).dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected.loc[:, unstacked.columns])
def test_stack_names_and_numbers(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
# Can't use mixture of names and numbers to stack
with pytest.raises(ValueError, match="level should contain"):
unstacked.stack([0, "month"])
def test_stack_multiple_out_of_bounds(
self, multiindex_year_month_day_dataframe_random_data
):
# nlevels == 3
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
with pytest.raises(IndexError, match="Too many levels"):
unstacked.stack([2, 3])
with pytest.raises(IndexError, match="not a valid level number"):
unstacked.stack([-4, -3])
def test_unstack_period_series(self):
# GH4342
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period",
)
idx2 = Index(["A", "B"] * 3, name="str")
value = [1, 2, 3, 4, 5, 6]
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period"
)
expected = DataFrame(
{"A": [1, 3, 5], "B": [2, 4, 6]}, index=e_idx, columns=["A", "B"]
)
expected.columns.name = "str"
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
idx1 = pd.PeriodIndex(
["2013-01", "2013-01", "2013-02", "2013-02", "2013-03", "2013-03"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-11", "2013-10", "2013-09", "2013-08", "2013-07"],
freq="M",
name="period2",
)
idx = MultiIndex.from_arrays([idx1, idx2])
s = Series(value, index=idx)
result1 = s.unstack()
result2 = s.unstack(level=1)
result3 = s.unstack(level=0)
e_idx = pd.PeriodIndex(
["2013-01", "2013-02", "2013-03"], freq="M", name="period1"
)
e_cols = pd.PeriodIndex(
["2013-07", "2013-08", "2013-09", "2013-10", "2013-11", "2013-12"],
freq="M",
name="period2",
)
expected = DataFrame(
[
[np.nan, np.nan, np.nan, np.nan, 2, 1],
[np.nan, np.nan, 4, 3, np.nan, np.nan],
[6, 5, np.nan, np.nan, np.nan, np.nan],
],
index=e_idx,
columns=e_cols,
)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
tm.assert_frame_equal(result3, expected.T)
def test_unstack_period_frame(self):
# GH4342
idx1 = pd.PeriodIndex(
["2014-01", "2014-02", "2014-02", "2014-02", "2014-01", "2014-01"],
freq="M",
name="period1",
)
idx2 = pd.PeriodIndex(
["2013-12", "2013-12", "2014-02", "2013-10", "2013-10", "2014-02"],
freq="M",
name="period2",
)
value = {"A": [1, 2, 3, 4, 5, 6], "B": [6, 5, 4, 3, 2, 1]}
idx = MultiIndex.from_arrays([idx1, idx2])
df = DataFrame(value, index=idx)
result1 = df.unstack()
result2 = df.unstack(level=1)
result3 = df.unstack(level=0)
e_1 = | pd.PeriodIndex(["2014-01", "2014-02"], freq="M", name="period1") | pandas.PeriodIndex |
import argparse
import logging
import os
import warnings
from collections import Counter
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import pandas as pd
import numpy as np
from pyteomics import mass
from . import AA_stat, locTools
def main():
pars = argparse.ArgumentParser()
pars.add_argument('--params', help='CFG file with parameters.'
'An example can be found at https://github.com/SimpleNumber/aa_stat',
required=True)
pars.add_argument('--dir', help='Directory to store the results. '
'Default value is current directory.', default='.')
pars.add_argument('-v', '--verbosity', action='count', default=1, help='Increase output verbosity')
input_spectra = pars.add_mutually_exclusive_group()
input_spectra.add_argument('--mgf', nargs='+', help='MGF files to localize modifications')
input_spectra.add_argument('--mzML', nargs='+', help='mzML files to localize modifications')
input_file = pars.add_mutually_exclusive_group(required=True)
input_file.add_argument('--pepxml', nargs='+', help='List of input files in pepXML format')
input_file.add_argument('--csv', nargs='+', help='List of input files in CSV format')
levels = [logging.WARNING, logging.INFO, logging.DEBUG]
args = pars.parse_args()
save_directory = args.dir
level = 2 if args.verbosity >= 2 else args.verbosity
logging.basicConfig(format='%(levelname)5s: %(asctime)s %(message)s',
datefmt='[%H:%M:%S]', level=levels[level])
logger = logging.getLogger(__name__)
logger.info("Starting...")
params = ConfigParser(delimiters=('=', ':'),
comment_prefixes=('#'),
inline_comment_prefixes=('#'))
params.read(args.params)
params_dict = AA_stat.get_parameters(params)
params_dict = AA_stat.get_additional_params(params_dict) #params_dict 'window'
data = AA_stat.read_input(args, params_dict)
hist, popt_pvar = AA_stat.fit_peaks(data, args, params_dict)
# print(popt_pvar)
# print('=======================================')
final_mass_shifts = AA_stat.filter_mass_shifts(popt_pvar)
# print(final_mass_shifts)
mass_shift_data_dict = AA_stat.group_specific_filtering(data, final_mass_shifts, params_dict)
# print('======================',mass_shift_data_dict )
zero_mass_shift = AA_stat.get_zero_mass_shift(list(mass_shift_data_dict.keys()))
logger.info("Systematic mass shift equals to %s", AA_stat.mass_format(zero_mass_shift) )
mass_shift_data_dict = AA_stat.systematic_mass_shift_correction(mass_shift_data_dict, zero_mass_shift)
if len(mass_shift_data_dict) < 2:
logger.info('Mass shifts were not found.')
logger.info('Filtered mass shifts:')
for i in mass_shift_data_dict.keys():
# print(mass_shift_data_dict.keys())Da
logger.info(AA_stat.MASS_FORMAT.format(i))
else:
distributions, number_of_PSMs, ms_labels = AA_stat.calculate_statistics(mass_shift_data_dict, 0, params_dict, args)
# print(mass_shift_data_dict)
table = AA_stat.save_table(distributions, number_of_PSMs, ms_labels)
# print(table['mass shift'])
table.to_csv(os.path.join(save_directory, 'aa_statistics_table.csv'), index=False)
# print('=======================', table)
AA_stat.summarizing_hist(table, save_directory)
logger.info('Summarizing hist prepared')
AA_stat.render_html_report(table, params_dict, save_directory)
logger.info('AA_stat results saved to %s', os.path.abspath(args.dir))
table.index = table['mass shift'].apply(AA_stat.mass_format)
spectra_dict = AA_stat.read_spectra(args)
if spectra_dict.keys():
if args.mgf:
params_dict['mzml_files'] = False
else:
params_dict['mzml_files'] = True
logger.info('Starting Localization using MS/MS spectra...')
# print(params_dict['mzml_files'])
ms_labels = pd.Series(ms_labels)
locmod_df = pd.DataFrame({'mass shift': ms_labels})
locmod_df['# peptides in bin'] = table['# peptides in bin']
locmod_df[['is isotope', 'isotop_ind']] = locTools.find_isotopes(locmod_df['mass shift'], tolerance=AA_stat.ISOTOPE_TOLERANCE)
locmod_df['sum of mass shifts'] = locTools.find_modifications(locmod_df.loc[~locmod_df['is isotope'], 'mass shift'])
locmod_df['sum of mass shifts'].fillna(False, inplace=True)
locmod_df['aa_stat candidates'] = locTools.get_candidates_from_aastat(table,
labels=params_dict['labels'], threshold=AA_stat.AA_STAT_CAND_THRESH)
u = mass.Unimod().mods
unimod_db = np.array(u)
unimod_df = | pd.DataFrame(u) | pandas.DataFrame |
import pandas as pd
import numpy as np
import dash
dash.__version__
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output,State
import plotly.graph_objects as go
import os
print(os.getcwd())
df_ip_big= | pd.read_csv('data/processed/COVID_final_set.csv',sep=';') | pandas.read_csv |
#!/usr/local/bin/python
def pprint_color(obj, flat=False):
jsonpickle.set_preferred_backend('json')
jsonpickle.set_encoder_options('json', sort_keys=True, indent=4)
if flat is True:
parsed = jsonpickle.encode(obj, unpicklable=False)
else:
parsed = jsonpickle.encode(obj, make_refs=True)
print(
highlight(
parsed,
JsonLexer(),
Terminal256Formatter(style='rainbow_dash')
)
)
def get_dict_from_table(table, clones_dict, check_dict):
table_file = open(table, "r")
table_file_dict = dict()
header = []
for row in table_file:
if re.match('^SEQUENCE_ID', row, re.IGNORECASE):
header = row.rstrip().split("\t")
continue
if not header:
print(header)
print("No header in the file")
sys.exit()
row_list = row.rstrip().split("\t")
row_dict = dict(zip(header, row_list))
if check_dict:
if row_list[0] in clones_dict:
table_file_dict[row_list[0]] = row_dict
else:
table_file_dict[row_list[0]] = row_dict
return(table_file_dict)
def get_sequences(igblast_airr_dict, v_germline_sequences, organism, hv_primer, kv_primer, lv_primer, corrected_regions_file_dict):
header = [
'full_input',
'corrected_input',
'full_input_from_start',
'corrected_input_from_start'
]
sequences_dict = dict()
aux_dict = dict()
for key in igblast_airr_dict.keys():
full_input_from_start = ""
corrected_input = ""
corrected_input_from_start = ""
full_input = igblast_airr_dict[key]['sequence']
vdj_sequences = corrected_regions_file_dict[key]['SEQUENCE_VDJ']
vdj_sequences = re.sub("-", "", vdj_sequences)
full_input = re.match(r'(^\S*' + vdj_sequences + ')', full_input).group(1)
fwr1_start = int(igblast_airr_dict[key]['v_sequence_start']) - 1
v_germline_start = int(igblast_airr_dict[key]['v_germline_start']) - 1
v_germline_id = igblast_airr_dict[key]['v_call'].split(",")[0]
if re.search(r"IGH", v_germline_id):
correction_length = int(hv_primer)
elif re.search(r"IGK", v_germline_id):
correction_length = int(kv_primer)
elif re.search(r"IGL", v_germline_id):
correction_length = int(lv_primer)
v_germ_sequence = v_germline_sequences[v_germline_id].seq
if fwr1_start <= v_germline_start:
if v_germline_start > correction_length:
from_start_nth_nt_germ_seq = v_germ_sequence[:v_germline_start]
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[fwr1_start:]
corrected_input = full_input
full_input_from_start = corrected_input_from_start
else:
from_start_nth_nt_germ_seq = v_germ_sequence[:correction_length]
full_input_end = (correction_length - v_germline_start) + fwr1_start
relative_germline_start = correction_length - full_input_end
germline_overlap_seq = from_start_nth_nt_germ_seq[relative_germline_start:]
corrected_input = germline_overlap_seq + full_input[full_input_end :]
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[full_input_end:]
full_input_from_start = from_start_nth_nt_germ_seq[:relative_germline_start] + full_input
elif fwr1_start > v_germline_start:
if v_germline_start > correction_length:
from_start_nth_nt_germ_seq = v_germ_sequence[:v_germline_start]
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[fwr1_start : ]
corrected_input = full_input[:fwr1_start - v_germline_start] + from_start_nth_nt_germ_seq[:v_germline_start] + full_input[fwr1_start: ]
full_input_from_start = corrected_input
else:
from_start_nth_nt_germ_seq = v_germ_sequence[:correction_length]
full_input_end = (correction_length - v_germline_start) + fwr1_start
corrected_input_from_start = from_start_nth_nt_germ_seq + full_input[full_input_end :]
corrected_input = full_input[: fwr1_start - v_germline_start ] + corrected_input_from_start
full_input_from_start = full_input[: fwr1_start - v_germline_start ] + from_start_nth_nt_germ_seq[:v_germline_start] + full_input[fwr1_start:]
sequences_list = [str(full_input), str(corrected_input), str(full_input_from_start), str(corrected_input_from_start)]
aux_dict = dict(zip(header, sequences_list))
sequences_dict[key] = aux_dict
return(sequences_dict)
def check_dict_keys(igblast_dict):
keys_to_check = ['CDR3-IMGT (germline)_from', 'CDR3-IMGT (germline)_to', 'CDR3-IMGT (germline)_length', 'CDR3-IMGT (germline)_matches', 'CDR3-IMGT (germline)_mismatches', 'CDR3-IMGT (germline)_gaps',
'FR1-IMGT_from', 'FR1-IMGT_to', 'FR1-IMGT_length', 'FR1-IMGT_matches', 'FR1-IMGT_mismatches', 'FR1-IMGT_gaps',
'CDR1-IMGT_from', 'CDR1-IMGT_to', 'CDR1-IMGT_length', 'CDR1-IMGT_matches', 'CDR1-IMGT_mismatches', 'CDR1-IMGT_gaps',
'FR2-IMGT_from', 'FR2-IMGT_to', 'FR2-IMGT_length', 'FR2-IMGT_matches', 'FR2-IMGT_mismatches', 'FR2-IMGT_gaps',
'CDR2-IMGT_from', 'CDR2-IMGT_to', 'CDR2-IMGT_length', 'CDR2-IMGT_matches', 'CDR2-IMGT_mismatches', 'CDR2-IMGT_gaps',
'FR3-IMGT_from', 'FR3-IMGT_to', 'FR3-IMGT_length', 'FR3-IMGT_matches', 'FR3-IMGT_mismatches', 'FR3-IMGT_gaps']
for seq in igblast_dict:
for key in keys_to_check:
if key not in igblast_dict[seq]:
igblast_dict[seq][key] = np.nan
return(igblast_dict)
def get_dict_from_igblast_fmt7(clones_dict, igblast_fmt7):
igblast_file = open(igblast_fmt7, "r")
igblast_file_dict = dict()
information_dict = dict()
key = None
header = []
header_list = []
information_all_regions = []
information_flag = False
for row in igblast_file:
if re.match(".*Query: ", row):
key = row.split(" ")[2].rstrip()
continue
if re.match(".*Alignment summary", row):
header = re.search(r'\(.*\)', row).group(0)
header = header.split(",")
header = [element.strip() for element in header]
header[0] = header[0].replace("(", "")
header[-1] = header[-1].replace(")", "")
header_aux = header
information_flag = True
continue
if (re.match("^(?!Total)", row)) and (information_flag):
information_list = row.rstrip().split("\t")
region = information_list[0]
header = [region + "_" + element for element in header]
header_list.append(header)
information_all_regions.append(information_list[1:])
header = header_aux
continue
elif re.match("^Total\t", row):
information_flag = False
flat_header_list = [
item for sublist in header_list for item in sublist
]
flat_information_list = [
item for sublist in information_all_regions for item in sublist
]
information_dict = dict(
zip(flat_header_list, flat_information_list)
)
header_list = []
information_all_regions = []
if key is not None and key in clones_dict:
igblast_file_dict[key] = information_dict
igblast_file_dict_corrected = check_dict_keys(igblast_file_dict)
print("Correction:")
print(igblast_file_dict_corrected)
return(igblast_file_dict_corrected)
def hamming_distance(chaine1, chaine2):
return sum(c1 != c2 for c1, c2 in zip(chaine1, chaine2))
def aminoacids_mismatches(aminoacids_sequences_table):
mismatches_list = []
for i in range(0, aminoacids_sequences_table.shape[0]):
v_germ_seq = str(
aminoacids_sequences_table.iloc[i]['v_germline_alignment_aa']
)
v_seq_aa = str(
aminoacids_sequences_table.iloc[i]['v_sequence_alignment_aa'])
if len(v_germ_seq) > len(v_seq_aa):
v_germ_seq_subset = v_germ_seq[:len(v_seq_aa)]
mismatches_list.append(
hamming_distance(
v_germ_seq_subset,
v_seq_aa))
elif len(v_germ_seq) < len(v_seq_aa):
v_seq_aa_subset = v_seq_aa[:len(v_germ_seq)]
mismatches_list.append(
hamming_distance(
v_germ_seq,
v_seq_aa_subset))
elif len(v_germ_seq) == len(v_seq_aa):
mismatches_list.append(hamming_distance(v_germ_seq, v_seq_aa))
return(mismatches_list)
def select_information(define_clones_dict, igblast_airr_dict, igblast_fmt7_dict, corrected_sequences_dict, correction):
define_clones_pd = pd.DataFrame(define_clones_dict).T
igblast_airr_pd = | pd.DataFrame(igblast_airr_dict) | pandas.DataFrame |
from __future__ import annotations
import math
import re
from decimal import Decimal, ROUND_HALF_UP
from dateutil.parser._parser import ParserError
from typing import Dict, Hashable, Union
import json
import numpy
import pandas
from pandas import Series
from .utils import to_utf8_bytes
from .errors import InvalidRedshiftType
Dtype = Union[str, "RedshiftType"]
DtypeArg = Union[Dtype, Dict[Hashable, Dtype]]
_TYPE_REGEX = re.compile(r"^([a-zA-Z0-9 ]*)(\(([0-9, ]*?)\))?$")
def get_redshift_type(type_str):
m = _TYPE_REGEX.match(type_str)
if not m:
raise InvalidRedshiftType(
"Redshift type not found for '{}'".format(type_str)
)
type_name = m.group(1)
type_args = m.group(3)
type_name = type_name.upper().strip()
type_dict = {
"SMALLINT": SmallInt,
"INT2": SmallInt,
"INTEGER": Integer,
"INT": Integer,
"INT4": Integer,
"BIGINT": BigInt,
"INT8": BigInt,
"DECIMAL": Numeric,
"NUMERIC": Numeric,
"REAL": Real,
"FLOAT4": Real,
"DOUBLE PRECISION": DoublePrecision,
"FLOAT8": DoublePrecision,
"FLOAT": DoublePrecision,
"BOOLEAN": Boolean,
"BOOL": Boolean,
"CHAR": Char,
"CHARACTER": Char,
"NCHAR": Char,
"BPCHAR": BPChar,
"VARCHAR": VarChar,
"CHARACTER VARYING": VarChar,
"NVARCHAR": VarChar,
"TEXT": Text,
"DATE": Date,
"TIMESTAMP": TimeStamp,
"TIMESTAMP WITHOUT TIME ZONE": TimeStamp,
"TIMESTAMPTZ": TimeStampTz,
"TIMESTAMP WITH TIME ZONE": TimeStampTz,
"TIME": Time,
"TIME WITHOUT TIME ZONE": Time,
"TIMETZ": TimeTz,
"TIME WITH TIME ZONE": TimeTz,
"GEOMETRY": Geometry,
"SUPER": Super,
}
if type_name not in type_dict:
raise InvalidRedshiftType(
"Redshift type not found for '{}'".format(type_str)
)
redshift_type = type_dict[type_name]
if type_args:
type_args = [int(elm.strip()) for elm in type_args.split(",")]
else:
type_args = []
return redshift_type(*type_args)
class RedshiftType(object):
"""An abstracttype for Redshift types.
Each type has encoder and decoder.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
type, for ease of dealing.
"""
_ESCAPES = [
("\\", "\\\\"),
("'", "\\'"),
("\n", "\\n"),
("\t", "\\t"),
("\b", "\\b"),
("\f", "\\f"),
]
def _check(self, text, ubytes):
pass
def _encode_text(self, text):
if pandas.isnull(text) or pandas.isna(text):
return "NULL"
ubytes = to_utf8_bytes(str(text))
encoded_text = ubytes.decode("utf-8")
self._check(encoded_text, ubytes)
encoded_text = "\n".join(encoded_text.splitlines())
for old, new in self._ESCAPES:
encoded_text = encoded_text.replace(old, new)
return "'{}'".format(encoded_text)
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame to
``str``-typed Redshift notations, which are used in DMLs.
First, values are casted to string. Next, character encoding is
changed to ``utf-8``, which Redshift supports as a multibyte
character set. Next, strings are checked in terms of length or
multibyte characters to avoid errors when running ``INSERT``
statements. Then, escapes are replaced. Finally, the string is quoted.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.fillna(numpy.nan)
encoded_col = encoded_col.map(self._encode_text)
return encoded_col
def decode(self, col: Series) -> Series:
"""Decode response from Redshift data api to Python ojects. See
comments on each Redshift type class to confirm what type or class
is used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing Python objects.
"""
return col
def __str__(self):
return self.__redshift_name__
class DoublePrecision(RedshiftType):
"""A type for Redshift ``DOUBLE PRECISION`` type.
This type is decoded to numpy ``float64`` type.
The encoder for this type accepts any types which are able to
casted to numpy ``float64`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy float type, for ease of dealing.
"""
__np_type__ = "float64"
__redshift_name__ = "DOUBLE PRECISION"
__min_abs__ = 2.22507385850721e-308
__max_abs__ = 1.79769313486231e308
__to_be_checked__ = True
def _check_range(self, val):
if pandas.isna(val) or val == 0.0:
return val
val_abs = abs(val)
if val_abs < self.__min_abs__ or self.__max_abs__ < val_abs:
raise TypeError(
"'{}' is out of range for type '{}'".format(val, str(self))
)
return val
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
First, values are casted to numpy float type. Next, value
range are checked to avoid overflow or underflow errors
when running ``INSERT`` statements. Finally, the numeric
types are casted to str.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.astype(self.__np_type__)
encoded_col = encoded_col.fillna(numpy.nan)
if self.__to_be_checked__:
encoded_col.map(self._check_range)
encoded_col = encoded_col.replace([numpy.nan], ["NULL"])
encoded_col = encoded_col.astype(str)
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str or float types. This decoder will map these raw values to
the proper numpy float type, for ease of dealing.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy float values.
"""
return col.astype(self.__np_type__)
class Real(DoublePrecision):
"""A type for Redshift ``REAL`` type.
This type is decoded to numpy ``float64`` type since deciaml
inaccuracy is observed in case of using numpy ``float32``.
The encoder for this type accepts any values which are able to
casted to numpy ``float64`` type and do not cause overflow or
underflow for Redshift ``REAL`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy float type, for ease of dealing.
"""
__redshift_name__ = "REAL"
__min_abs__ = 1.1755e-38
__max_abs__ = 3.40282e38
class Numeric(DoublePrecision):
"""A type for Redshift ``DECIMAL`` type.
In this library, the alias ``NUMERIC`` is used instead to avoid
conflict with Python ``decimal.Decimal`` type.
There are not any fixed point types in numpy. This made us
develop the decoder to cast values from Redshift Data API to
Python ``decimal.Decimal``. Hense, the output for the decoder
looks ``object``-type Series.
The encoder for this type accepts any values which are able to
casted to numpy ``float128`` type and do not cause overflow
for the decimal with the specific precision and scale.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
``decimal.Decimal`` type, for ease of dealing.
"""
__np_type__ = "float128"
__redshift_name__ = "NUMERIC"
def __init__(self, precision: int = 18, scale: int = 0):
"""Construct the Redshift ``NUMERIC`` type.
Parameters
----------
precision :
the numeric precision for use in DDL ``CREATE TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
"""
if precision != 18 or scale != 0:
self.__redshift_name__ = "NUMERIC({},{})".format(precision, scale)
self.__max_abs__ = Decimal(str(math.pow(10.0, precision - scale)))
self.__exp_to_quantize__ = Decimal(
"1.{}".format("".join(["0" for i in range(scale)]))
)
def _encode_numeric(self, val):
if pandas.isna(val):
return "NULL"
decimal_val = Decimal(str(val)).quantize(
self.__exp_to_quantize__, rounding=ROUND_HALF_UP
)
decimal_val_abs = abs(decimal_val)
if self.__max_abs__ <= decimal_val_abs:
raise TypeError(
"'{}' is out of range for type '{}'".format(
decimal_val, str(self)
)
)
return str(decimal_val)
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
First, values are casted to numpy.float128 type to avoid
numeric inaccuracy. Next, numpy.float128 values are converted to
decimal.Decimal values which are accurate under the conditions of
``precision`` and ``scale.`` Then, after checking min/max to avoid
overflow error, values are casted to str.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
encoded_col = col.astype(self.__np_type__)
encoded_col = encoded_col.fillna(numpy.nan)
encoded_col = encoded_col.map(self._encode_numeric)
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will cast these raw values represented
in string to ``decimal.Decimal`` objects. To store
``decimal.Decimal`` objects, the ``dtype`` for the returned
pandas.Series looks ``object``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing ``decimal.Decimal`` values.
"""
def _to_decimal(val):
if pandas.isna(val):
return numpy.nan
return Decimal(val)
return col.map(_to_decimal)
class Integer(DoublePrecision):
"""A type for Redshift ``INTEGER`` type.
This type values are decoded to numpy ``int32`` in case NULL is
not included, and otherwise decoded to pandas ``Int64``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``Int64`` type and do not cause overflow
for Redshift ``INTEGER`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy int type, for ease of dealing.
"""
__np_type__ = "Int64"
__np_type_not_null__ = "int32"
__redshift_name__ = "INTEGER"
__min__ = -2147483648
__max__ = 2147483647
def _check_range(self, val):
if pandas.isna(val):
return numpy.nan
if val < self.__min__ or self.__max__ < val:
raise TypeError(
"'{}' is out of range for type '{}'".format(val, str(self))
)
return val
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str or int type. This decoder will cast these raw values to
numpy int objects.
If NULL is included, this decoder will use the nullable integer
type ``Int64``. Otherwise, numpy integer types, which are ``int16``,
``int32``, or ``int64``, are used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy int values.
"""
if len(col) != col.count():
return col.astype(self.__np_type__)
return col.astype(self.__np_type_not_null__)
class SmallInt(Integer):
"""A type for Redshift ``SMALLINT`` type.
This type values are decoded to numpy ``int16`` in case NULL is
not included, and otherwise decoded to pandas ``Int64``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``Int64`` type and do not cause overflow
for Redshift ``SMALLINT`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy int type, for ease of dealing.
"""
__np_type_not_null__ = "int16"
__redshift_name__ = "SMALLINT"
__min__ = -32768
__max__ = 32767
class BigInt(Integer):
"""A type for Redshift ``BIGINT`` type.
This type values are decoded to numpy ``int64`` in case NULL is
not included, and otherwise decoded to pandas ``Int64``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``Int64`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy int type, for ease of dealing.
"""
__np_type_not_null__ = "int64"
__redshift_name__ = "BIGINT"
__to_be_checked__ = False
class Boolean(Integer):
"""A type for Redshift ``BOOLEAN`` type.
This type values are decoded to numpy ``bool`` in case NULL is
not included, and otherwise decoded to pandas ``boolean``, which
is the nullable integer type.
The encoder for this type accepts any values which are able to
casted to numpy ``boolean`` type.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
numpy bool type, for ease of dealing.
"""
__np_type__ = "boolean"
__np_type_not_null__ = "bool"
__redshift_name__ = "BOOLEAN"
__to_be_checked__ = False
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will cast these raw values to numpy
boolean objects.
If NULL is included, this decoder will use the nullable boolean
type ``boolean``. Otherwise, numpy boolean type ``bool`` is used.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing numpy bool values.
"""
decoded_col = col.map(
lambda x: numpy.nan if pandas.isna(x) else (x == "true")
)
return super().decode(decoded_col)
class Char(RedshiftType):
"""A type for Redshift ``CHAR`` type.
This type values are decoded to Python ``str``.
The encoder for this type accepts strings, but it rejects multibyte
characters and too long strings.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__multibyte_is_allowed__ = False
__redshift_name__ = "CHAR"
__default_length__ = 1
__max_length__ = 4096
def __init__(self, length: int = 0):
"""Construct the Redshift ``CHAR``type.
Parameters
----------
length :
Length limitation.
"""
self.length = self.__default_length__ if length == 0 else length
if self.length != self.__default_length__:
if self.length > self.__max_length__:
raise InvalidRedshiftType(
"The length '{}' is too long for '{}'".format(
self.length, self.__redshift_name__
)
)
self.__redshift_name__ = "{}({})".format(
self.__redshift_name__, length
)
def _check(self, text, ubytes):
if (not self.__multibyte_is_allowed__) and len(text) != len(ubytes):
raise TypeError("multibyte characters must not be included")
if len(ubytes) > self.length:
raise TypeError(
"'{}' exceeds length ({})".format(text, self.length)
)
class BPChar(Char):
"""A type for Redshift ``BPCHAR`` type. This type is alias for
``CHAR`` type, but the specification about length is different:
the length is fixed as 256.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__redshift_name__ = "BPCHAR"
__default_length__ = 256
def __init__(self):
"""Construct the Redshift ``BPCHAR`` type."""
self.length = self.__default_length__
class VarChar(Char):
"""A type for Redshift ``VARCHAR`` type.
This type values are decoded to Python ``str``.
The encoder for this type accepts strings. Unlike ``CHAR`` type,
this type accepts multibyte characters.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__multibyte_is_allowed__ = True
__redshift_name__ = "VARCHAR"
__default_length__ = 256
__max_length__ = 65535
class Text(VarChar):
"""A type for Redshift ``TEXT`` type. This type is alias for
``VARCHAR`` type, but the specification about length is different:
the length is fixed as 256.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the Python
``str`` type, for ease of dealing.
"""
__redshift_name__ = "TEXT"
def __init__(self):
"""Construct the Redshift ``TEXT`` type."""
self.length = self.__default_length__
class TimeStamp(RedshiftType):
"""A type for Redshift ``TIMESTAMP`` type.
This type values are decoded with ``pandas.to_datetime``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.datetime type, for ease of dealing.
"""
__redshift_name__ = "TIMESTAMP"
__dt_format__ = "%Y-%m-%d %H:%M:%S"
__utc__ = False
def encode(self, col: Series) -> Series:
"""Encode objects stored in a column for pandas.DataFrame to
``str``-typed notations for Redshift DMLs.
First, values are converted to datetime objects with
``pandas.to_datetime``.
Next, values are converted to text with ``strftime`` and format.
In the format, quote is included.
Parameters
----------
col : pandas.Series
The column storing original objects in pandas.DataFrame.
Returns
-------
encoded_col : pandas.Series
Column storing Redshift notations.
"""
def _strftime(obj):
if pandas.isnull(obj) or pandas.isna(obj):
return numpy.nan
if hasattr(obj, "strftime"):
return obj.strftime(self.__dt_format__)
return obj
# to deal with such types that is unable to convert to datetime
# with ``pandas.to_datetime`` like Timea
encoded_col = col.map(_strftime)
try:
encoded_col = pandas.to_datetime(encoded_col, utc=self.__utc__)
except ParserError as err:
raise TypeError("cannot parse to datetime {}".format(str(err)))
output_format = "'{}'".format(self.__dt_format__)
encoded_col = encoded_col.dt.strftime(output_format)
encoded_col = encoded_col.fillna("NULL")
return encoded_col
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will convert raw values to datetime
objects with ``pandas.to_datetime``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.datetime values.
"""
return pandas.to_datetime(col, errors="coerce", utc=self.__utc__)
class TimeStampTz(TimeStamp):
"""A type for Redshift ``TIMESTAMPTZ`` type.
This type values are decoded with ``pandas.to_datetime`` and
option ``utc``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.datetime type, for ease of dealing.
"""
__redshift_name__ = "TIMESTAMPTZ"
__dt_format__ = "%Y-%m-%d %H:%M:%S%z"
__utc__ = True
class Date(TimeStamp):
"""A type for Redshift ``DATE`` type.
This type values are decoded by converting to datetime objects
with ``pandas.to_datetime`` and in addition converting to date
objects by ``datetime.date()``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.date type, for ease of dealing.
"""
__redshift_name__ = "DATE"
__dt_format__ = "%Y-%m-%d"
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. First, this decoder converts raw values to datetime
objects with ``pandas.to_datetime``. Next, datetime objects are
converted to date objects with ``datetime.date()``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.date values.
"""
col = super().decode(col)
return col.map(lambda dt: dt.date() if not pandas.isna(dt) else dt)
class Time(TimeStamp):
"""A type for Redshift ``TIME`` type.
This type values are decoded by converting to datetime objects
with ``pandas.to_datetime`` and in addition converting to time
objects by ``datetime.time()``.
The encoder for this type accepts any values which can be
converted to datetime objects with ``pandas.to_datetime``.
Methods
-------
encode :
Encode objects stored in a column for pandas.DataFrame
to ``str``-typed notations for Redshift DMLs.
decode :
Map raw values from Redshift Data API to the proper
datetime.time type, for ease of dealing.
"""
__redshift_name__ = "TIME"
__dt_format__ = "%H:%M:%S"
def decode(self, col: Series) -> Series:
"""Raw values in response from Redshift data api are represented
in str type. This decoder will convert raw values to datetime
objects with ``pandas.to_datetime``. Next, datetime objects are
converted to time objects with ``datetime.time()``.
Parameters
----------
col :
Column storing raw values in response from Redshift Data API.
Returns
-------
col :
Column storing datetime.time values.
"""
col = super().decode(col)
return col.map(lambda dt: dt.time() if not | pandas.isna(dt) | pandas.isna |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 16:02:22 2018
@author: joyce
"""
import pandas as pd
import numpy as np
import pymysql
from sklearn import linear_model
import time
from functools import wraps
config = {
'host': 'magiquant.mysql.rds.aliyuncs.com',
'port': 3306,
'user':'haoamc',
'passwd':'<PASSWORD>',
'db': 'quant'
}
def timer(function):
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print ("Total time running %s: %s seconds" %(function.__name__, str(round((t1-t0), 2))))
return result
return function_timer
@timer
def get_stockdata_from_sql(mode,begin,end,name):
"""
get stock market data from sql,include: [Open,High,Low,Close,Pctchg,Vol,
Amount,total_shares,free_float_shares,Vwap]
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
if mode == 0:
query = "SELECT stock_id,%s FROM stock_market_data WHERE trade_date='%s';"%(name,begin)
else:
query = "SELECT trade_date,stock_id,%s FROM stock_market_data WHERE trade_date >='%s' \
AND trade_date <= '%s';"%(name,begin,end)
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
if mode == 0:
date.columns =['ID','name']
else:
date.columns =['date','ID','name']
date = date.set_index('ID')
date.columns = ['date',name]
date = date.set_index([date['date'],date.index],drop = True)
del date['date']
return date
finally:
if conn:
conn.close()
@timer
def get_indexdata_from_sql(mode,begin,end,name,index):
"""
get stock market data from sql,include: [open,high,low,close,pctchg]
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
if mode == 0:
query = "SELECT stock_id,%s FROM index_market_data WHERE trade_date='%s' AND stock_id ='%s';"%(name,begin,index)
else:
query = "SELECT trade_date,stock_id,%s FROM index_market_data WHERE trade_date >='%s' \
AND trade_date <= '%s' AND stock_id ='%s';"%(name,begin,end,index)
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
if mode == 0:
date.columns =['ID','name']
else:
date.columns =['date','ID','name']
date = date.set_index('ID')
date.columns = ['date',name]
date = date.set_index([date['date'],date.index],drop = True)
del date['date']
return date
finally:
if conn:
conn.close()
@timer
def get_tradedate(begin, end):
"""
get tradedate between begin date and end date
Params:
begin:
str,eg: '1999-01-01'
end:
str,eg: '2017-12-31'
Return:
pd.DataFrame
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day= 1 AND \
calendar_date>='" + begin + "' AND calendar_date<='" + end + "';"
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
return date
finally:
if conn:
conn.close()
def get_fama(begin,end,name,index):
"""
get fama factor from sql
Params:
begin:
str,eg:"1990-01-01"
end:
str:eg:"2017-12-31"
index:
str, index id ,eg :'000300.SH'
name:
the name of fama factors ['SMB','HML','MKT']
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT trade_date,%s FROM fama_factor WHERE \
stock_id = '%s' AND trade_date >= '%s' AND trade_date <= '%s';"\
%(name,index,begin,end)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['date',name]
return data
finally:
if conn:
conn.close()
@timer
def Corr(df,num):
"""
Params:
data:
pd.DataFrame,multi-index = ['date','id']
num:
int
Return:
pd.DataFrame,multi-index = ['date','id']
"""
df.columns = ['r1','r2']
df1 = df['r1']
df2 = df['r2']
df1_unstack = df1.unstack()
df2_unstack = df2.unstack()
corr = df1_unstack.rolling(num).corr(df2_unstack)
corr = corr.stack()
corr = pd.DataFrame(corr)
corr.columns = ['corr']
return corr
@timer
def Cov(df,num):
"""
Params:
data:
pd.DataFrame,multi-index = ['date','id']
num:
int
Return:
pd.DataFrame,multi-index = ['date','id']
"""
df.columns = ['r1','r2']
df1 = df['r1']
df2 = df['r2']
df1_unstack = df1.unstack()
df2_unstack = df2.unstack()
corr = df1_unstack.rolling(num).cov(df2_unstack)
corr = corr.stack()
corr = pd.DataFrame(corr)
corr.columns = ['cov']
return corr
@timer
def Delta(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
num:
int
Return:
pd.DataFrame,multi-inde = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.shift(num)
df_temp1 = df_unstack - df_temp
df_final = df_temp1.stack()
return df_final
@timer
def Delay(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
num:
int
Return:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.shift(num)
df_final = df_temp.stack()
return df_final
@timer
def Rank(df):
"""
Params:
df: pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
Return:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
"""
df = df.swaplevel(0,1)
df_mod = df.unstack()
df_rank = df_mod.rank(axis = 1)
df_final_temp = df_rank.stack()
# 内外层索引进行交换
df_final = df_final_temp.swaplevel(0,1)
return df_final
@timer
def Cross_max(df1,df2):
"""
Params:
df1:
pd.DataFrame,multi-index = ['date','ID']
df2:
pd.DataFrame,multi-index = ['date','ID']
"""
df = pd.concat([df1,df2],axis =1 ,join = 'inner')
df_max = np.max(df,axis = 1)
return df_max
@timer
def Cross_min(df1,df2):
"""
Params:
df1:
pd.DataFrame,multi-index = ['date','ID']
df2:
pd.DataFrame,multi-index = ['date','ID']
"""
df = pd.concat([df1,df2],axis =1 ,join = 'inner')
df_min = np.min(df,axis = 1)
return df_min
@timer
def Sum(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack(level = 'ID')
df_temp = df_unstack.rolling(num).sum()
df_final = df_temp.stack()
return df_final
@timer
def Mean(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).mean()
df_final = df_temp.stack()
return df_final
@timer
def STD(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).std()
df_final = df_temp.stack()
return df_final
@timer
def TsRank(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df = df.swaplevel(0,1)
df_unstack = df.unstack()
date = df_unstack.index.tolist()
ts_rank = pd.DataFrame([])
for i in range(num,len(date)):
df = df_unstack.iloc[i-num:i,:]
df_rank = df.rank(axis = 0)
ts_rank_temp = pd.DataFrame(df_rank.iloc[num-1,:]).T
ts_rank = pd.concat([ts_rank,ts_rank_temp],axis = 0)
ts_rank = ts_rank.stack()
ts_rank = ts_rank.swaplevel(0,1)
return ts_rank
@timer
def TsMax(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).max()
df_final = df_temp.stack()
return df_final
@timer
def TsMin(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).min()
df_final = df_temp.stack()
return df_final
@timer
def DecayLinear(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df2 = df.swaplevel(0,1)
df_unstack2 = df2.unstack()
secID = df_unstack2.index.tolist()
array = np.arange(num,0,-1)
w = pd.DataFrame(array/array.sum())
date = df_unstack.index.tolist()
df_wma = | pd.DataFrame([]) | pandas.DataFrame |
# Libraries
import Levenshtein as lv
from difflib import SequenceMatcher
import string
import random
import pandas as pd
import numpy as np
import rdkit
from rdkit import Chem
from torch import matmul, rand, sspaddmm
import pandas as pd
import numpy as np
import seaborn as sns
from multiprocessing import Pool
from tabulate import tabulate
import glob
import h5py
import os
class AugmentClass():
def __init__(self, filename):
"""
Class responsible for augmenting a dataset using uniform
randomization or levenshtein distance based randomization.
:param filename: path to file
:type filename: string
"""
# Initialize field attributes
self.filename = filename
self.data = pd.read_csv(filename)
# HDF5 file categoricals
self.counter = 0
self.group = "obs_"
self.dataset = None
self.string_dt = h5py.special_dtype(vlen=str)
# Pre-define variables
# Check self.data upper range: limit to small upper bound for testing.
self.data = self.data.iloc[0:, :]
self.products_pools = None
self.reactant_pools = None
self.top_idx = None
self.f = None
self.collection = []
self.reactant_series = pd.Series([])
self.product_series = pd.Series([])
self.pandas = {'source': [], 'target': []}
self.df = pd.DataFrame({'source': [], 'target': []})
self.largest = 0
def __pairwise(self, src, tgt, n_perms, top_n, randomize_src=True):
"""
Computes Levenshtein similarity between a source SMILES
string and a target SMILES string.
:param src: SMILES string source
:type src: string
:param tgt: SMILES string target
:type tgt: string
:param n_perms: number of permutations to compute
:type n_perms: int
:param top_n: return the top-n similarities
:type top_n: int
:return: [top matching reactant, top matching target]
:rtype: [string, string]
"""
# Compute the similarity of a source string to target using levenshtein distance
scores, source, target = self.__levenshtein_rand(
src, tgt, n_perms=n_perms, top_n=top_n, randomize_src=randomize_src)
return source, target, scores
def __get_components(self, values):
"""
Function to split a pandas DataFrame of reaction SMILES strings
into reactant component(s) and product component.
:param values: DataFrame values where columns == ['source', 'target']
:type values: numpy array
:return: [reactantA, reatantB, product] or [reactant, product] is reaction set is singular
:rtype: tuple
"""
collections = []
# Append Products
source = values[0]
target = values[1]
collections.append(target)
if '.' in source:
splits = source.split('.')
for split in splits:
collections.append(split)
else:
collections.append(source)
source_values = collections[1:]
target_values = collections[0]
# Clear cache
collections = []
return [target_values], source_values if len(source_values) > 1 else [source_values[0]]
def __uniform_rand(self, SMILES, n_perms):
"""
Randomizes canonical SMILES [n_perms] times
:param SMILES: SMILES string
:type SMILES: string
:param n_perms: number of times to permute
:type n_perms: int
:return: list of randomized src SMILES
:rtype: list
"""
# Generate molelcule from SMILES string
mol = Chem.MolFromSmiles(SMILES)
# Extract atom numbers
ans = list(range(mol.GetNumAtoms()))
randomizations = []
while len(randomizations) < n_perms:
print(randomization)
# Permute atom ordering
np.random.shuffle(ans)
# Renumber atoms
rmol = Chem.RenumberAtoms(mol, ans)
# Convert random mol back to SMILES
rSMILES = Chem.MolToSmiles(rmol, canonical=False)
randomizations.append(rSMILES)
return randomizations
def __levenshtein_rand(self, src, tgt, n_perms, top_n, randomize_src=True):
"""
This function takes in a source list [src] and a target list [tgt] of SMILES, it randomizes each element in [src] and [tgt] [n-perms]-times,
if randomize_src == False, only [tgt] is randomized.
Then a randomly selected [src]-variant (or [src] If randomize_src== False) is selected and the similarity to each [tgt]-vairnat is computed.
Only the top-n similar SMILES are retained, along with their similarity ratio e.g. [ratio, src-variant, tgt-variant].
:param src: source SMILES (singular)
:type src: string
:param tgt: target SMILES string (list)
:type tgt: list of smil strings
:param n_perms: number of times to permute src smile
:type n_perms: int
:param top_n: return the best n src-target matches
:type top_n: int
:param randomize_src: whether to randomize source SMILES string or not, defaults to True
:type randomize_src: bool, optional
:return: best_n list of [similarity _ratio, src, tgt]
:rtype: [float, string, string]
"""
# Generate n-unique permutations of the src SMILE
lv_list = []
# Generate random permutations of src or use original
if randomize_src:
r_SMILE_set = self.__uniform_rand(src, n_perms)
r = np.random.randint(0, len(r_SMILE_set))
smi = r_SMILE_set[r]
else:
smi = src
# Randomize tgt
p_SMILE_set = self.__uniform_rand(tgt, n_perms)
# Put original sequence in permutation list
# if include_self:
# r_SMILE_set.append(src)
for i in range(0, len(p_SMILE_set)):
ratio = lv.ratio(smi, p_SMILE_set[i])
lv_list.append([ratio, smi, p_SMILE_set[i]])
# Sort similarity scores and return top-n
ranks = sorted(lv_list, key=lambda x: x[0], reverse=True)
best_perms = ranks[:top_n][0]
return best_perms
def __define_h5_file_groups(self, save_path, chunk_id):
"""
Defines h5 groups to save observations to for data augmentation
:param save_path: path to save h5 to
:type save_path: str
:param chunk_id: chunk counter
:type chunk_id: int
"""
# Pre_define all groups in H5 file
self.f = h5py.File(save_path, 'w')
for idx, row in enumerate(range(np.shape(self.data)[0])):
self.f.create_group(self.group + chunk_id + '_' + str(idx))
def __get_levenshtein_SMILES(self, chunk_id, save_path, n_perms, mode='pandas'):
"""
Data augmentation using Levenshtein rand
:param chunk_id: don't set manually, partition counter
:type chunk_id: int
:param save_path: path to save augmentated files to
:type save_path: str
:param n_perms: nuber of randomizations
:type n_perms: int
:param mode: file type, ["pandas", "h5"], defaults to 'pandas'
:type mode: str, optional
:return: either pandas dataframe of h5 file containing augmented data (original canonical forms not included)
:rtype: [pandas.DataFrame, hdf5]
"""
collection = {'source': [], 'target': []}
products = None
reactants = None
# Enumerate over all values in the DataFrame
for i, row in enumerate(self.data.values):
collection = {'source': [], 'target': []}
prod, reacts = self.__get_components(row)
reacts.sort()
# Make n_permutation samples for each component
for n in range(0, n_perms):
if len(reacts) > 1:
# If there are multiple reactants, sample pool "recurssively" e.g. (RB:P, P:RA )
# Round 1
ra, p, _ = self.__pairwise(reacts[0], prod[0],
top_n=1, n_perms=1000)
# Round 2
p, rb, _ = self.__pairwise(
p, reacts[1], top_n=1, n_perms=1000, randomize_src=False)
# Update variable names
reactants = ".".join([ra, rb])
products = p
elif len(reacts) == 1:
# Single round
ra, p, _ = self.__pairwise(reacts[0], prod[0],
top_n=1, n_perms=1000)
# Update variabble names
reactants = ra
products = p
# Save to dictionary (suitable for extracting & exporting to H5 or pandas)
if mode == 'h5':
collection['source'].append(reactants)
collection['target'].append(products)
elif mode == 'pandas':
self.pandas['source'].append(reactants)
self.pandas['target'].append(products)
if mode == 'h5':
# Write to H5 file
for i, (k, v) in enumerate(collection.items()):
if k == 'target':
v = | pd.Series(v) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 12:00:37 2020
@author: tianyu
"""
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from sklearn.preprocessing import Normalizer
import math
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
from sklearn.metrics.pairwise import euclidean_distances
import os
from sklearn import preprocessing
from sklearn import linear_model
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
#path = '/Users/tianyu/Google Drive/fasttext/gcn/pygcn-master/data/cora/'
#dataset = 'cora'
def high_var_dfdata_gene(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar) #small --> big
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[ind_maxvar[:num]], ind_maxvar[:num]
ind_gene = data.index.values[ind_maxvar[:num]]
return data.iloc[ind_maxvar[:num]],gene.loc[ind_gene]
def high_var_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_var_npdata(data, num, gene = None, ind=False): #data: gene*cell
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# geneind2 = np.random.choice(ind_maxvar[num//2:], size = num//2, replace = False)
# gene_ind = np.concatenate((gene_ind, geneind2))
#np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_tfIdf_npdata(data,tfIdf, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(tfIdf, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_expr_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[gene_ind]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_expr_npdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def get_rank_gene(OutputDir, dataset):
gene = pd.read_csv(OutputDir+dataset+'/rank_genes_dropouts_'+dataset+'.csv')
return gene
def rank_gene_dropouts(data, OutputDir, dataset):
# data: n_cell * n_gene
genes = np.zeros([np.shape(data)[1],1], dtype = '>U10')
train = pd.DataFrame(data)
train.columns = np.arange(len(train.columns))
# rank genes training set
dropout = (train == 0).sum(axis='rows') # n_gene * 1
dropout = (dropout / train.shape[0]) * 100
mean = train.mean(axis='rows') # n_gene * 1
notzero = np.where((np.array(mean) > 0) & (np.array(dropout) > 0))[0]
zero = np.where(~((np.array(mean) > 0) & (np.array(dropout) > 0)))[0]
train_notzero = train.iloc[:,notzero]
train_zero = train.iloc[:,zero]
zero_genes = train_zero.columns
dropout = dropout.iloc[notzero]
mean = mean.iloc[notzero]
dropout = np.log2(np.array(dropout)).reshape(-1,1)
mean = np.array(mean).reshape(-1,1)
reg = linear_model.LinearRegression()
reg.fit(mean,dropout)
residuals = dropout - reg.predict(mean)
residuals = pd.Series(np.array(residuals).ravel(),index=train_notzero.columns) # n_gene * 1
residuals = residuals.sort_values(ascending=False)
sorted_genes = residuals.index
sorted_genes = sorted_genes.append(zero_genes)
genes[:,0] = sorted_genes.values
genes = pd.DataFrame(genes)
genes.to_csv(OutputDir + dataset + "/rank_genes_dropouts_" + dataset + ".csv", index = False)
def data_noise(data): # data is samples*genes
for i in range(data.shape[0]):
#drop_index = np.random.choice(train_data.shape[1], 500, replace=False)
#train_data[i, drop_index] = 0
target_dims = data.shape[1]
noise = np.random.rand(target_dims)/10.0
data[i] = data[i] + noise
return data
def norm_max(data):
data = np.asarray(data)
max_data = np.max([np.absolute(np.min(data)), np.max(data)])
data = data/max_data
return data
def findDuplicated(df):
df = df.T
idx = df.index.str.upper()
filter1 = idx.duplicated(keep = 'first')
print('duplicated rows:',np.where(filter1 == True)[0])
indd = np.where(filter1 == False)[0]
df = df.iloc[indd]
return df.T
# In[]:
def load_labels(path, dataset):
labels = pd.read_csv(os.path.join(path + dataset) +'/Labels.csv',index_col = None)
labels.columns = ['V1']
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return labels
def load_usoskin(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='usoskin', net='String'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'data_13776.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(13776)+'.npz')
print(adj.shape)
labels = pd.read_csv(path +'/' +dataset +'/data_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return adj, np.asarray(data), labels
def load_kolod(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='kolod', net='pcc'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'kolod.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = np.corrcoef(np.asarray(data))
#adj[np.where(adj < 0.3)] = 0
labels = | pd.read_csv(path +'/' +dataset +'/kolod_labels.csv',index_col = 0) | pandas.read_csv |
"""
Description:
-----------
This script hosts many helper functions to make notebooks cleaner. The hope is to not distract users with ugly code.
"""
import numpy as np
import pandas as pd
import matplotlib.patheffects as path_effects
import matplotlib
import matplotlib.pyplot as plt
#outlines for text
pe1 = [path_effects.withStroke(linewidth=2,
foreground="k")]
pe2 = [path_effects.withStroke(linewidth=2,
foreground="w")]
def show_vals(da,ax):
vals = da.values
x = np.arange(0,vals.shape[0])
y = np.arange(0,vals.shape[1])
X,Y = np.meshgrid(x,y)
X = np.ravel(X)
Y = np.ravel(Y)
V = np.ravel(vals)
for i in np.arange(0,len(X)):
fillstr = np.asarray(np.round(V[i],2),dtype=str)
fillstr = np.char.ljust(fillstr,4,'0')
if np.round(V[i],2) > 0.5:
ax.text(X[i]-0.2,Y[i],fillstr,color='k')
else:
ax.text(X[i]-0.2,Y[i],fillstr,color='w')
return
def draw_zoom_window(ax,a,b):
ax.plot([a,a,a+10,a+10,a],[b,b+10,b+10,b,b],'-k',lw=3)
ax.plot([a,a,a+10,a+10,a],[b,b+10,b+10,b,b],'-',color='dodgerblue',lw=2)
return a,b
def get_right_units_vil(vil):
"""they scaled VIL weird, so this unscales it"""
tmp = np.zeros(vil.shape)
idx = np.where(vil <=5)
tmp[idx] = 0
idx = np.where((vil>5)*(vil <= 18))
tmp[idx] = (vil[idx] -2)/90.66
idx = np.where(vil>18)
tmp[idx] = np.exp((vil[idx] - 83.9)/38.9)
return tmp
def plot_feature_loc(da,ax,q = [0,1,10,25,50,75,90,99,100]):
""" This will plot representative pixels matching the quantiles given """
vals = np.nanpercentile(da,q)
xs = []
ys = []
for v in vals:
local_idx = np.where(np.round(da.values,1) == np.round(v,1))
if len(local_idx[0]) > 1:
ii = np.random.choice(np.arange(0,len(local_idx[0])),size=1)
xs.append(local_idx[0][ii[0]])
ys.append(local_idx[1][ii[0]])
else:
ii = 0
xs.append(local_idx[0][ii])
ys.append(local_idx[1][ii])
markerlist = ['min','$01$','$10$','$25$','$50$','$75$','$90$','$99$','max']
zlist = list(zip(xs,ys))
for i,(x,y) in enumerate(zlist):
ax.text(y,x,markerlist[i],path_effects=pe2)
return
def adjust_keys(df,keyadd,dask=False,dropevent=False):
if dask:
keys = df.columns
newkeys = []
newkeys.append('dtime')
newkeys = newkeys + list(keys[1:-1]+keyadd)
newkeys.append(keys[-1])
else:
keys = df.keys()
newkeys = list(keys[:-1]+keyadd)
newkeys.append(keys[-1])
df.columns = newkeys
if dropevent:
df = df.drop(columns='event')
if dask:
df['dtime'] = df['dtime'].astype(np.datetime64)
return df
def clear_nan(X,y):
tmp = np.hstack([X,y.reshape([y.shape[0],1])])
df_tmp = pd.DataFrame(tmp)
df_tmp = df_tmp.dropna(how='any')
tmp = df_tmp.to_numpy()
X = tmp[:,:-1]
y = tmp[:,-1:]
y = np.asarray(y.squeeze(),dtype=int)
return X,y
def load_n_combine_df(path_to_data='../datasets/sevir/',features_to_keep=np.arange(0,36,1),class_labels=True,dropzeros=False):
df_ir = pd.read_csv(path_to_data + 'IR_stats_master.csv',index_col=0,low_memory=False,parse_dates=True)
df_wv = pd.read_csv(path_to_data + 'WV_stats_master.csv',index_col=0,low_memory=False,parse_dates=True)
df_vis = pd.read_csv(path_to_data + 'VIS_stats_master.csv',index_col=0,low_memory=False,parse_dates=True)
df_vil = pd.read_csv(path_to_data + 'VIL_stats_master.csv',index_col=0,low_memory=False,parse_dates=True)
df_li = pd.read_csv(path_to_data + 'LI_stats_master.csv',index_col=0,low_memory=False,parse_dates=True)
#get rid of that outlier
df_wv = df_wv.where(df_wv.q000 > -10000)
if dropzeros:
df_li = df_li.where(df_li.c >= 1)
#get rid of NaNs
idx_keep = np.where(~df_vis.isna().all(axis=1).values)[0]
df_ir = df_ir.iloc[idx_keep]
df_wv = df_wv.iloc[idx_keep]
df_vis = df_vis.iloc[idx_keep]
df_vil = df_vil.iloc[idx_keep]
df_li = df_li.iloc[idx_keep]
#make sure idx are in order
df_ir = df_ir.sort_index()
df_wv = df_wv.sort_index()
df_vis = df_vis.sort_index()
df_vil = df_vil.sort_index()
df_li = df_li.sort_index()
#adjust keys so merging doesnt make keys confusing
df_ir = adjust_keys(df_ir,'_ir')
df_wv = adjust_keys(df_wv,'_wv')
df_vis = adjust_keys(df_vis,'_vi')
df_vil = adjust_keys(df_vil,'_vl')
df_li = adjust_keys(df_li,'_li')
#drop event column
df_ir= df_ir.drop(columns='event')
df_wv= df_wv.drop(columns='event')
df_vis= df_vis.drop(columns='event')
df_vil= df_vil.drop(columns='event')
df_li = df_li.drop(columns='event')
#slice on time
train_slice = slice('2017-01-01','2019-06-01')
other_slice = slice('2019-06-01','2019-12-31')
df_ir_tr = df_ir[train_slice]
df_ir_ot = df_ir[other_slice]
df_wv_tr = df_wv[train_slice]
df_wv_ot = df_wv[other_slice]
df_vis_tr = df_vis[train_slice]
df_vis_ot = df_vis[other_slice]
df_vil_tr = df_vil[train_slice]
df_vil_ot = df_vil[other_slice]
df_li_tr = df_li[train_slice]
df_li_ot = df_li[other_slice]
#throw every other week into each the val and test
va = np.arange(22,52,2)
te = np.arange(23,53,2)
dtime = pd.to_datetime(df_ir_ot.index)
idx_v = np.array([],dtype=int)
for v in va:
tmp = np.where(dtime.isocalendar().week == v)[0]
if len(tmp) == 0:
continue
else:
idx_v = np.append(idx_v,tmp)
idx_t = np.array([],dtype=int)
for t in te:
tmp = np.where(dtime.isocalendar().week == t)[0]
if len(tmp) == 0:
continue
else:
idx_t = np.append(idx_t,tmp)
df_ir_va = df_ir_ot.iloc[idx_v]
df_ir_te = df_ir_ot.iloc[idx_t]
df_wv_va = df_wv_ot.iloc[idx_v]
df_wv_te = df_wv_ot.iloc[idx_t]
df_vis_va = df_vis_ot.iloc[idx_v]
df_vis_te = df_vis_ot.iloc[idx_t]
df_vil_va = df_vil_ot.iloc[idx_v]
df_vil_te = df_vil_ot.iloc[idx_t]
df_li_va = df_li_ot.iloc[idx_v]
df_li_te = df_li_ot.iloc[idx_t]
X_train = np.hstack([df_ir_tr.to_numpy()*1e-2,df_wv_tr.to_numpy()*1e-2,df_vis_tr.to_numpy()*1e-4,get_right_units_vil(df_vil_tr.to_numpy())])
X_validate = np.hstack([df_ir_va.to_numpy()*1e-2,df_wv_va.to_numpy()*1e-2,df_vis_va.to_numpy()*1e-4,get_right_units_vil(df_vil_va.to_numpy())])
X_test= np.hstack([df_ir_te.to_numpy()*1e-2,df_wv_te.to_numpy()*1e-2,df_vis_te.to_numpy()*1e-4,get_right_units_vil(df_vil_te.to_numpy())])
#choose
X_train = X_train[:,features_to_keep]
X_validate = X_validate[:,features_to_keep]
X_test = X_test[:,features_to_keep]
#make class labels
if class_labels:
y_train = np.zeros(X_train.shape[0],dtype=int)
y_train[np.where(df_li_tr.c_li.values >= 1)] = 1
y_validate = np.zeros(X_validate.shape[0],dtype=int)
y_validate[np.where(df_li_va.c_li.values >= 1)] = 1
y_test = np.zeros(X_test.shape[0],dtype=int)
y_test[np.where(df_li_te.c_li.values >= 1)] = 1
else:
y_train = df_li_tr.c_li.values
y_validate = df_li_va.c_li.values
y_test = df_li_te.c_li.values
#clean out nans
X_train,y_train = clear_nan(X_train,y_train)
X_validate,y_validate = clear_nan(X_validate,y_validate)
X_test,y_test = clear_nan(X_test,y_test)
return (X_train,y_train),(X_validate,y_validate),(X_test,y_test)
def percentile(n):
def percentile_(x):
return np.percentile(x, n)
percentile_.__name__ = 'percentile_%s' % n
return percentile_
#determine unconditional mean, sum R in each bin. But then devide by master counts
def boxbin(x,y,xedge,yedge,c=None,figsize=(5,5),cmap='viridis',mincnt=10,vmin=None,vmax=None,edgecolor=None,powernorm=False,
ax=None,normed=False,method='mean',quantile=None,alpha=1.0,cbar=True,unconditional=False,master_count=np.array([])):
""" This function will grid data for you and provide the counts if no variable c is given, or the median if
a variable c is given. In the future I will add functionallity to do the median, and possibly quantiles.
x: 1-D array
y: 1-D array
xedge: 1-D array for xbins
yedge: 1-D array for ybins
c: 1-D array, same len as x and y
returns
axis handle
cbar handle
C matrix (counts or median values in bin)
"""
midpoints = np.empty(xedge.shape[0]-1)
for i in np.arange(1,xedge.shape[0]):
midpoints[i-1] = xedge[i-1] + (np.abs(xedge[i] - xedge[i-1]))/2.
#note on digitize. bin 0 is outside to the left of the bins, bin -1 is outside to the right
ind1 = np.digitize(x,bins = xedge) #inds of x in each bin
ind2 = np.digitize(y,bins = yedge) #inds of y in each bin
#drop points outside range
outsideleft = np.where(ind1 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind1 != len(xedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
outsideleft = np.where(ind2 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind2 != len(yedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
if c is None:
c = np.zeros(len(ind1))
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
df2 = df.groupby(["x","y"]).count()
df = df2.where(df2.values >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if normed:
n_samples = np.ma.sum(C)
C = C/n_samples
C = C*100
print('n_samples= {}'.format(n_samples))
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,edgecolor=edgecolor,norm=colors.PowerNorm(gamma=0.5),vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=edgecolor,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
elif unconditional:
df = | pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c}) | pandas.DataFrame |
import torch
import pandas as pd
from transformers import BertTokenizer
import numpy as np
from common import get_parser
parser = get_parser()
args = parser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
def format_label(label):
label = (str(label))
label = label[1:len(label)-2]
for char in label:
label = label.replace(".","")
return list(map(int, label.split(" ")))
class Dataset:
def __init__(self, text, target):
self.text = text
self.tokenizer = BertTokenizer.from_pretrained(args.pretrained_tokenizer_name)
self.max_length = args.max_length
self.target = target
def __len__(self):
return len(self.text)
def __getitem__(self, item):
text = str(self.text[item])
text = "".join(text.split())
inputs = self.tokenizer.encode_plus(
text = text,
padding = "max_length",
truncation = True,
max_length = self.max_length
)
input_ids = inputs["input_ids"]
token_type_ids = inputs["token_type_ids"]
attention_mask = inputs["attention_mask"]
return{
"input_ids":torch.tensor(input_ids,dtype = torch.long),
"attention_mask":torch.tensor(attention_mask, dtype = torch.long),
"token_type_ids":torch.tensor(token_type_ids, dtype = torch.long),
"target":torch.tensor(format_label(self.target[item]), dtype = torch.long)
}
def generate_dsfile(path):
df = | pd.read_csv(path) | pandas.read_csv |
# coding: utf8
import torch
import numpy as np
import os
import warnings
import pandas as pd
from time import time
import logging
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from sklearn.utils import column_or_1d
import scipy.sparse as sp
from clinicadl.tools.deep_learning.iotools import check_and_clean
from clinicadl.tools.deep_learning import EarlyStopping, save_checkpoint
#####################
# CNN train / test #
#####################
def train(model, train_loader, valid_loader, criterion, optimizer, resume, log_dir, model_dir, options, logger=None):
"""
Function used to train a CNN.
The best model and checkpoint will be found in the 'best_model_dir' of options.output_dir.
Args:
model: (Module) CNN to be trained
train_loader: (DataLoader) wrapper of the training dataset
valid_loader: (DataLoader) wrapper of the validation dataset
criterion: (loss) function to calculate the loss
optimizer: (torch.optim) optimizer linked to model parameters
resume: (bool) if True, a begun job is resumed
log_dir: (str) path to the folder containing the logs
model_dir: (str) path to the folder containing the models weights and biases
options: (Namespace) ensemble of other options given to the main script.
logger: (logging object) writer to stdout and stderr
"""
from tensorboardX import SummaryWriter
from time import time
if logger is None:
logger = logging
columns = ['epoch', 'iteration', 'time',
'balanced_accuracy_train', 'loss_train',
'balanced_accuracy_valid', 'loss_valid']
if hasattr(model, "variational") and model.variational:
columns += ["kl_loss_train", "kl_loss_valid"]
filename = os.path.join(os.path.dirname(log_dir), 'training.tsv')
if not resume:
check_and_clean(model_dir)
check_and_clean(log_dir)
results_df = pd.DataFrame(columns=columns)
with open(filename, 'w') as f:
results_df.to_csv(f, index=False, sep='\t')
options.beginning_epoch = 0
else:
if not os.path.exists(filename):
raise ValueError('The training.tsv file of the resumed experiment does not exist.')
truncated_tsv = pd.read_csv(filename, sep='\t')
truncated_tsv.set_index(['epoch', 'iteration'], inplace=True)
truncated_tsv.drop(options.beginning_epoch, level=0, inplace=True)
truncated_tsv.to_csv(filename, index=True, sep='\t')
# Create writers
writer_train = SummaryWriter(os.path.join(log_dir, 'train'))
writer_valid = SummaryWriter(os.path.join(log_dir, 'validation'))
# Initialize variables
best_valid_accuracy = -1.0
best_valid_loss = np.inf
epoch = options.beginning_epoch
model.train() # set the model to training mode
train_loader.dataset.train()
early_stopping = EarlyStopping('min', min_delta=options.tolerance, patience=options.patience)
mean_loss_valid = None
t_beginning = time()
while epoch < options.epochs and not early_stopping.step(mean_loss_valid):
logger.info("Beginning epoch %i." % epoch)
model.zero_grad()
evaluation_flag = True
step_flag = True
tend = time()
total_time = 0
for i, data in enumerate(train_loader, 0):
t0 = time()
total_time = total_time + t0 - tend
if options.gpu:
imgs, labels = data['image'].cuda(), data['label'].cuda()
else:
imgs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, train_output = model(imgs)
kl_loss = kl_divergence(z, mu, std)
loss = criterion(train_output, labels) + kl_loss
else:
train_output = model(imgs)
loss = criterion(train_output, labels)
# Back propagation
loss.backward()
del imgs, labels
if (i + 1) % options.accumulation_steps == 0:
step_flag = False
optimizer.step()
optimizer.zero_grad()
del loss
# Evaluate the model only when no gradients are accumulated
if options.evaluation_steps != 0 and (i + 1) % options.evaluation_steps == 0:
evaluation_flag = False
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = i + epoch * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], i))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], i))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
tend = time()
logger.debug('Mean time per batch loading: %.10f s'
% (total_time / len(train_loader) * train_loader.batch_size))
# If no step has been performed, raise Exception
if step_flag:
raise Exception('The model has not been updated once in the epoch. The accumulation step may be too large.')
# If no evaluation has been performed, warn the user
elif evaluation_flag and options.evaluation_steps != 0:
warnings.warn('Your evaluation steps are too big compared to the size of the dataset.'
'The model is evaluated only once at the end of the epoch')
# Always test the results and save them once at the end of the epoch
model.zero_grad()
logger.debug('Last checkpoint at the end of the epoch %d' % epoch)
_, results_train = test(model, train_loader, options.gpu, criterion)
mean_loss_train = results_train["total_loss"] / (len(train_loader) * train_loader.batch_size)
_, results_valid = test(model, valid_loader, options.gpu, criterion)
mean_loss_valid = results_valid["total_loss"] / (len(valid_loader) * valid_loader.batch_size)
model.train()
train_loader.dataset.train()
global_step = (epoch + 1) * len(train_loader)
writer_train.add_scalar('balanced_accuracy', results_train["balanced_accuracy"], global_step)
writer_train.add_scalar('loss', mean_loss_train, global_step)
writer_valid.add_scalar('balanced_accuracy', results_valid["balanced_accuracy"], global_step)
writer_valid.add_scalar('loss', mean_loss_valid, global_step)
logger.info("%s level training accuracy is %f at the end of iteration %d"
% (options.mode, results_train["balanced_accuracy"], len(train_loader)))
logger.info("%s level validation accuracy is %f at the end of iteration %d"
% (options.mode, results_valid["balanced_accuracy"], len(train_loader)))
t_current = time() - t_beginning
row = [epoch, i, t_current,
results_train["balanced_accuracy"], mean_loss_train,
results_valid["balanced_accuracy"], mean_loss_valid]
if hasattr(model, "variational") and model.variational:
row += [results_train["total_kl_loss"] / (len(train_loader) * train_loader.batch_size),
results_valid["total_kl_loss"] / (len(valid_loader) * valid_loader.batch_size)]
row_df = pd.DataFrame([row], columns=columns)
with open(filename, 'a') as f:
row_df.to_csv(f, header=False, index=False, sep='\t')
accuracy_is_best = results_valid["balanced_accuracy"] > best_valid_accuracy
loss_is_best = mean_loss_valid < best_valid_loss
best_valid_accuracy = max(results_valid["balanced_accuracy"], best_valid_accuracy)
best_valid_loss = min(mean_loss_valid, best_valid_loss)
save_checkpoint({'model': model.state_dict(),
'epoch': epoch,
'valid_loss': mean_loss_valid,
'valid_acc': results_valid["balanced_accuracy"]},
accuracy_is_best, loss_is_best,
model_dir)
# Save optimizer state_dict to be able to reload
save_checkpoint({'optimizer': optimizer.state_dict(),
'epoch': epoch,
'name': options.optimizer,
},
False, False,
model_dir,
filename='optimizer.pth.tar')
epoch += 1
os.remove(os.path.join(model_dir, "optimizer.pth.tar"))
os.remove(os.path.join(model_dir, "checkpoint.pth.tar"))
def evaluate_prediction(y, y_pred):
"""
Evaluates different metrics based on the list of true labels and predicted labels.
Args:
y: (list) true labels
y_pred: (list) corresponding predictions
Returns:
(dict) ensemble of metrics
"""
true_positive = np.sum((y_pred == 1) & (y == 1))
true_negative = np.sum((y_pred == 0) & (y == 0))
false_positive = np.sum((y_pred == 1) & (y == 0))
false_negative = np.sum((y_pred == 0) & (y == 1))
accuracy = (true_positive + true_negative) / (true_positive + true_negative + false_positive + false_negative)
if (true_positive + false_negative) != 0:
sensitivity = true_positive / (true_positive + false_negative)
else:
sensitivity = 0.0
if (false_positive + true_negative) != 0:
specificity = true_negative / (false_positive + true_negative)
else:
specificity = 0.0
if (true_positive + false_positive) != 0:
ppv = true_positive / (true_positive + false_positive)
else:
ppv = 0.0
if (true_negative + false_negative) != 0:
npv = true_negative / (true_negative + false_negative)
else:
npv = 0.0
balanced_accuracy = (sensitivity + specificity) / 2
results = {'accuracy': accuracy,
'balanced_accuracy': balanced_accuracy,
'sensitivity': sensitivity,
'specificity': specificity,
'ppv': ppv,
'npv': npv,
}
return results
def test(model, dataloader, use_cuda, criterion, mode="image", use_labels=True):
"""
Computes the predictions and evaluation metrics.
Args:
model: (Module) CNN to be tested.
dataloader: (DataLoader) wrapper of a dataset.
use_cuda: (bool) if True a gpu is used.
criterion: (loss) function to calculate the loss.
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
use_labels (bool): If True the true_label will be written in output DataFrame and metrics dict will be created.
Returns
(DataFrame) results of each input.
(dict) ensemble of metrics + total loss on mode level.
"""
model.eval()
dataloader.dataset.eval()
if mode == "image":
columns = ["participant_id", "session_id", "true_label", "predicted_label"]
elif mode in ["patch", "roi", "slice"]:
columns = ['participant_id', 'session_id', '%s_id' % mode, 'true_label', 'predicted_label', 'proba0', 'proba1']
else:
raise ValueError("The mode %s is invalid." % mode)
softmax = torch.nn.Softmax(dim=1)
results_df = pd.DataFrame(columns=columns)
total_loss = 0
total_kl_loss = 0
total_time = 0
tend = time()
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
t0 = time()
total_time = total_time + t0 - tend
if use_cuda:
inputs, labels = data['image'].cuda(), data['label'].cuda()
else:
inputs, labels = data['image'], data['label']
if hasattr(model, "variational") and model.variational:
z, mu, std, outputs = model(inputs)
kl_loss = kl_divergence(z, mu, std)
total_kl_loss += kl_loss.item()
else:
outputs = model(inputs)
if use_labels:
loss = criterion(outputs, labels)
total_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
# Generate detailed DataFrame
for idx, sub in enumerate(data['participant_id']):
if mode == "image":
row = [[sub, data['session_id'][idx], labels[idx].item(), predicted[idx].item()]]
else:
normalized_output = softmax(outputs)
row = [[sub, data['session_id'][idx], data['%s_id' % mode][idx].item(),
labels[idx].item(), predicted[idx].item(),
normalized_output[idx, 0].item(), normalized_output[idx, 1].item()]]
row_df = pd.DataFrame(row, columns=columns)
results_df = pd.concat([results_df, row_df])
del inputs, outputs, labels
tend = time()
results_df.reset_index(inplace=True, drop=True)
if not use_labels:
results_df = results_df.drop("true_label", axis=1)
metrics_dict = None
else:
metrics_dict = evaluate_prediction(results_df.true_label.values.astype(int),
results_df.predicted_label.values.astype(int))
metrics_dict['total_loss'] = total_loss
metrics_dict['total_kl_loss'] = total_kl_loss
torch.cuda.empty_cache()
return results_df, metrics_dict
def sort_predicted(model, data_df, input_dir, model_options, criterion, keep_true,
batch_size=1, num_workers=0, gpu=False):
from .data import return_dataset, get_transforms
from torch.utils.data import DataLoader
from copy import copy
if keep_true is None:
return data_df
_, all_transforms = get_transforms(model_options.mode, model_options.minmaxnormalization)
dataset = return_dataset(mode=model_options.mode, input_dir=input_dir,
data_df=data_df, preprocessing=model_options.preprocessing,
train_transformations=None, all_transformations=all_transforms,
params=model_options)
dataloader = DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
test_options = copy(model_options)
test_options.gpu = gpu
results_df, _ = test(model, dataloader, gpu, criterion, model_options.mode, use_labels=True)
sorted_df = data_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
results_df = results_df.sort_values(['participant_id', 'session_id']).reset_index(drop=True)
if keep_true:
return sorted_df[results_df.true_label == results_df.predicted_label].reset_index(drop=True)
else:
return sorted_df[results_df.true_label != results_df.predicted_label].reset_index(drop=True)
#################################
# Voting systems
#################################
def mode_level_to_tsvs(output_dir, results_df, metrics, fold, selection, mode, dataset='train', cnn_index=None):
"""
Writes the outputs of the test function in tsv files.
Args:
output_dir: (str) path to the output directory.
results_df: (DataFrame) the individual results per patch.
metrics: (dict or DataFrame) the performances obtained on a series of metrics.
fold: (int) the fold for which the performances were obtained.
selection: (str) the metrics on which the model was selected (best_acc, best_loss)
mode: (str) input used by the network. Chosen from ['image', 'patch', 'roi', 'slice'].
dataset: (str) the dataset on which the evaluation was performed.
cnn_index: (int) provide the cnn_index only for a multi-cnn framework.
"""
if cnn_index is None:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', selection)
else:
performance_dir = os.path.join(output_dir, 'fold-%i' % fold, 'cnn_classification', 'cnn-%i' % cnn_index,
selection)
os.makedirs(performance_dir, exist_ok=True)
results_df.to_csv(os.path.join(performance_dir, '%s_%s_level_prediction.tsv' % (dataset, mode)), index=False,
sep='\t')
if metrics is not None:
metrics["%s_id" % mode] = cnn_index
if isinstance(metrics, dict):
pd.DataFrame(metrics, index=[0]).to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
elif isinstance(metrics, pd.DataFrame):
metrics.to_csv(os.path.join(performance_dir, '%s_%s_level_metrics.tsv' % (dataset, mode)),
index=False, sep='\t')
else:
raise ValueError("Bad type for metrics: %s. Must be dict or DataFrame." % type(metrics).__name__)
def concat_multi_cnn_results(output_dir, fold, selection, mode, dataset, num_cnn):
"""Concatenate the tsv files of a multi-CNN framework"""
prediction_df = pd.DataFrame()
metrics_df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
from datetime import timedelta
import pandas as pd
import pandas.tslib as tslib
import pandas.util.testing as tm
import pandas.tseries.period as period
from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period,
_np_version_under1p10, Index, Timedelta, offsets)
from pandas.tests.test_base import Ops
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
self.assertEqual(idx1.argmin(), 1)
self.assertEqual(idx2.argmin(), 0)
self.assertEqual(idx1.argmax(), 3)
self.assertEqual(idx2.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertIs(result, tslib.NaT)
def test_numpy_minmax(self):
pr = pd.period_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(pr), Period('2016-01-15', freq='D'))
self.assertEqual(np.max(pr), Period('2016-01-20', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, pr, out=0)
self.assertEqual(np.argmin(pr), 0)
self.assertEqual(np.argmax(pr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, pr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, pr, out=0)
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00',
'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
idx10 = PeriodIndex(['2011-01-01', '2011-02-01'], freq='3D')
exp1 = """PeriodIndex([], dtype='period[D]', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"""
exp3 = ("PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', "
"freq='D')")
exp4 = ("PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')")
exp5 = ("PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')")
exp6 = ("PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')")
exp7 = ("PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp8 = ("PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', "
"freq='Q-DEC')")
exp9 = ("PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')")
exp10 = ("PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5,
idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5,
exp6, exp7, exp8, exp9, exp10]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
idx5 = | PeriodIndex(['2011', '2012', '2013'], freq='A') | pandas.PeriodIndex |
import numpy as np
import pandas as pd
import math
import matplotlib
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import scipy.spatial as ss
import seaborn
from ..logging import info
from ..read_export import load_anndata
def bandwidth_nrd(x):
x = pd.Series(x)
h = (x.quantile([0.75]).values - x.quantile([0.25]).values) / 1.34
return 4 * 1.06 * min(math.sqrt(np.var(x, ddof=1)), h) * (len(x) ** (-1 / 5))
def rep(x, length):
len_x = len(x)
n = int(length / len_x)
r = length % len_x
re = []
for i in range(0, n):
re = re + x
for i in range(0, r):
re = re + [x[i]]
return re
# https://stackoverflow.com/questions/46166933/python-numpy-equivalent-of-r-rep-and-rep-len-functions?rq=1
# def rep2(x, length):
# x = np.array(x)
# res = np.repeat(x, length, axis=0)
# return res
def rep2(x, length_out):
return np.tile(x, length_out // len(x) + 1)[:length_out]
def dnorm(x, u=0, sig=1):
return np.exp(-(x - u) ** 2 / (2 * sig ** 2)) / (math.sqrt(2 * math.pi) * sig)
def kde2d(x, y, h=None, n=25, lims=None):
"""Reproduce kde2d function behavior from MASS package in R.
Two-dimensional kernel density estimation with an axis-aligned
bivariate normal kernel, evaluated on a square grid.
Arguments
---------
x: `List`
x coordinate of data
y: `List`
y coordinate of data
h: `List` (Default: None)
vector of bandwidths for :math:`x` and :math:`y` directions. Defaults to normal reference bandwidth
(see `bandwidth.nrd`). A scalar value will be taken to apply to both directions.
n: `int` (Default: 25)
Number of grid points in each direction. Can be scalar or a length-2 integer list.
lims: `List` (Default: None)
The limits of the rectangle covered by the grid as :math:`x_l, x_u, y_l, y_u`.
Returns
-------
A list of three components
gx, gy: `List`
The x and y coordinates of the grid points, lists of length `n`.
z: `List`
An :math:`n[1]` by :math:`n[2]` matrix of the estimated density: rows correspond to the value of :math:`x`,
columns to the value of :math:`y`.
"""
nx = len(x)
if not lims:
lims = [min(x), max(x), min(y), max(y)]
if (len(y) != nx):
raise Exception("data vectors must be the same length")
elif ((False in np.isfinite(x)) or (False in np.isfinite(y))):
raise Exception("missing or infinite values in the data are not allowed")
elif (False in np.isfinite(lims)):
raise Exception("only finite values are allowed in 'lims'")
else:
n = rep(n, length=2) if isinstance(n, list) else rep([n], length=2)
gx = np.linspace(lims[0], lims[1], n[0])
gy = np.linspace(lims[2], lims[3], n[1])
if h is None:
h = [bandwidth_nrd(x), bandwidth_nrd(y)]
else:
h = np.array(rep(h, length=2))
if h[0] <= 0 or h[1] <= 0:
raise Exception("bandwidths must be strictly positive")
else:
h /= 4
ax = pd.DataFrame()
ay = pd.DataFrame()
for i in range(len(x)):
ax[i] = (gx - x[i]) / h[0]
for i in range(len(y)):
ay[i] = (gy - y[i]) / h[1]
z = (np.matrix(dnorm(ax)) * np.matrix(dnorm(ay).T)) / (nx * h[0] * h[1])
return gx, gy, z
# understand the login information and use that for verbose
def viz_response(adata, pairs_mat, log=False, delay=1, k=5, grid_num=25, n_row=None, n_col=1, scales="free", return_data = False,
verbose=False):
"""Plot the lagged DREVI plot pairs of genes across pseudotime.
This plotting function builds on the original idea of DREVI plot but is extended in the context for causal network.
It considers the time delay between the hypothetical regulators to the target genes which is parametered by :math:`d`.
Lagged DREVI plot first estimates the joint density (:math:`P(x_{t - d}, y_t)`) for variables :math:`x_{t - d} and y_t`, then it
divides the joint density by the marginal density :math:`P(x_{t - d})` to get the conditional density estimate
(:math:`P(x_{t - d}, y_t | x_{x - d})`). We then calculate the z-score normalizing each column of conditional density. Note
that this plot tries to demonstrate the potential influence between two variables instead of the factual influence.
A red line corresponding to the point with maximal density on each :math:`x` value is plot which indicates the maximal possible
point for :math:`y_t` give the value of :math:`x_{t - d}`. The 2-d density is estimated through the kde2d function.
Arguments
---------
adata: `Anndata`
Annotated Data Frame, an Anndata object.
pairs_mat: 'np.ndarray'
A matrix where each row is the gene pair and the first column is the hypothetical source or regulator while
the second column represents the hypothetical target. The name in this matrix should match the name in the
gene_short_name column of the adata object.
log: `bool` (Default: False)
A logic argument used to determine whether or not you should perform log transformation (using :math:`log(expression + 1)`)
before calculating density estimates, default to be TRUE.
delay: `int` (Default: 1)
The time delay between the source and target gene.
k: `int` (Default: 5)
Number of k-nearest neighbors used in calculating 2-D kernel density
grid_num: `int` (Default: 25)
The number of grid when creating the lagged DREVI plot.
n_row: `int` (Default: None)
number of columns used to layout the faceted cluster panels.
n_col: `int` (Default: 1)
number of columns used to layout the faceted cluster panels.
scales: `str` (Default: 'free')
The character string passed to facet function, determines whether or not the scale is fixed or free in
different dimensions. (not used)
verbose:
A logic argument to determine whether or not we should print the detailed running information.
Returns
-------
In addition to figure created by matplotlib, it also returns:
flat_res: 'pd.core.frame.DataFrame'
a pandas data frame used to create the heatmap with four columns (`x`: x-coordinate; `y`: y-coordinate; `den`:
estimated density at x/y coordinate; `type`: the corresponding gene pair).
flat_res_subset: 'pd.core.frame.DataFrame'
a pandas data frame used to create the heatmap for the last gene pair (if multiple gene-pairs are inputted) with
four columns (`x`: x-coordinate; `y`: y-coordinate; `den`: estimated density at x/y coordinate; `type`: the
corresponding gene pair).
ridge_curve_subset: 'pd.core.frame.DataFrame'
a pandas data frame used to create the read ridge line for the last gene pair (if multiple gene-pairs are inputted) with
four columns (`x`: x-coordinate; `y`: y-coordinate; `type`: the corresponding gene pair).
"""
model = load_anndata(adata)
data = model.X # pd.DataFrame(model.expression.values,index = adata.var_names)
all_genes_in_pair = np.unique(pairs_mat)
if (not (set(all_genes_in_pair) <= set(data.index.values))):
raise Exception(
"adata doesn't include all genes in gene_pairs_mat. Make sure all genes are included in gene_short_name column of the obs property of adata.")
sub_data = data.loc[all_genes_in_pair, :]
if grid_num == None:
dim_val = (round((len(sub_data) - delay) / 4))
else:
dim_val = grid_num
flat_res = | pd.DataFrame(columns=["x", "y", "den", "type"]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.